qgraph/0000755000176200001440000000000014521151262011533 5ustar liggesusersqgraph/NAMESPACE0000644000176200001440000000645414430573263012773 0ustar liggesusers# C++ stuff: useDynLib(qgraph, .registration=TRUE) importFrom(Rcpp, evalCpp) # Rest: export(mutualInformation) # export(vein) export(flow) importFrom("utils", "capture.output") importFrom("grDevices", "hcl") S3method(print, ggmFit) S3method(plot, ggmFit) export(ggmFit) export(pathways) export(smallworldIndex) export(clusteringTable) export(clusteringPlot) export(centralityTable) export(centralityPlot) export(qgraph) export(qgraph.layout.fruchtermanreingold) export(qgraph.loadings) #export(qgraph.efa) #export(qgraph.svg) #export(qgraph.pca) #export(qgraph.sem) #export(qgraph.panel) #export(qgraph.semModel) #export(qgraph.cfa) #export(qgraph.lavaan) #export(qgraph.gui) export(centrality) S3method(print, qgraph) export(qgraph.animate) S3method(plot, qgraph) S3method(summary, qgraph) #importFrom(sem,"sem","standardizedCoefficients") #importFrom(psych,"principal") #importFrom(lavaan,"lavaan","cfa","standardizedSolution", "standardizedsolution", "parameterEstimates", "parameterestimates","inspect") importClassesFrom(lavaan,"lavaan") importFrom(stats,"factanal") # importFrom(ellipse,"ellipse") importFrom(corpcor,"cor2pcor") importFrom(graphics,"plot","points","lines","polygon") importFrom(png,"readPNG") importFrom(jpeg,"readJPEG") import(plyr) importFrom(Hmisc,"subplot") import(igraph) S3method(as.igraph, qgraph) importFrom(colorspace,rainbow_hcl) importFrom(Matrix,sparseMatrix) export(averageLayout) export(getWmat) S3method(getWmat, matrix) S3method(getWmat, data.frame) S3method(getWmat, igraph) S3method(getWmat, qgraph) S3method(getWmat, list) S3method(getWmat, bootnetResult) importFrom(reshape2, "melt") import(ggplot2) # importFrom(glasso, glasso) # importFrom(sna, component.largest) import(fdrtool) importFrom(psych,corr.p) # export(smoothAnimationList) # export(mixGraphs) # Functions Giulio: export(smallworldness) export(clustOnnela) export(clustZhang) export(clustWS) export(clustcoef_auto) export(centrality_auto) export(mat2vec) # LASSO STUFF: export(wi2net) export(EBICglasso) export(ggmModSelect) #export(EBICglasso2) #export(EBICglassoCluster) importFrom(Matrix, forceSymmetric) import(glasso) ## corauto stuff: #importFrom(huge, huge.npn) #importFrom(psych, tetrachoric) #importFrom(psych, polychoric) export(cor_auto) importFrom(lavaan, lavCor) export(VARglm) export(FDRnetwork) # importFrom(sendplot, "xy.send") # export(qgraphAnnotate) #importFrom(d3Network, "d3ForceNetwork") importFrom(gtools, mixedorder) importFrom(gtools, mixedsort) export(qgraphMixed) # exports for the BW function export(makeBW) # Extra: # Extra: importFrom("grDevices", "col2rgb", "dev.cur", "dev.list", "dev.new", "dev.off", "dev.size", "jpeg", "pdf", "png", "postscript", "rainbow", "rgb", "tiff") importFrom("graphics", "par", "rect", "strheight", "strwidth", "text", "title", "xspline") importFrom("methods", "is") importFrom("stats", "BIC", "ave", "coef", "cor", "cov", "cov2cor", "glm", "loess", "pchisq", "predict", "qt", "quantile", "rnorm", "sd", "weighted.mean") importFrom("utils", "browseURL", "combn", "menu", "setTxtProgressBar", "txtProgressBar") importFrom("graphics", "plot.new", "plot.window") importFrom("stats", "uniroot") # Parallel: import(parallel) importFrom("pbapply","pblapply") importFrom("abind","abind") #export(as.ggraph) qgraph/README0000644000176200001440000000043514430573263012425 0ustar liggesusersThis is the developmental version of qgraph. For the stable version please see the CRAN link to the package: http://cran.r-project.org/web/packages/qgraph/index.html For more information, please see the publication in Journal of Statistical Software: http://www.jstatsoft.org/v48/i04/qgraph/data/0000755000176200001440000000000014430573263012454 5ustar liggesusersqgraph/data/big5groups.RData0000644000176200001440000000125214430573263015457 0ustar liggesusers Un@F@ \D!*e:4x-U".y< G .s"A$9㱬(FS4-V܍ߛJvʮ%-o:LEI>f/GD>#-#M{?Tҏg2BV&G10TpsbPi)+]|}|/?ď#XAk-Dw8>~B?_+Fs6{|pnI斡Ʒ*q*{>/%~~+_[UEA#?:٥ӏ'sJq:l !|7z>dPi.xu/]|}|!~{|I!ߓ_˾#)^W9Wyh[6| `n*-5WiP鰟.>?cX=Tϧ$mtJ/]|%?cD/y>1ck_mG^'pEGZ xM.kR^Oe|gp[t|tM|e\^_|mDi/rQ!y~XOhI)Q')'-Ωx=`/3@[oЏMAH_cuWk~L?$:ɗճfGZ/|duQvm-gv'}rz$.}_י&&y͗OFv_aVcM/oqS^[8kS/׸s[ß39p-.uo*hoq+'_gXYlqodc$> ˁv6[ܷup\;ٟ/rW":=YG['Fz1|zo:,̞TםhMGYOߋl]Gfqio[3mC~ڮou*[Xm'8<-!7ixaKυo_>m28[M/ holr$׉voWny~9%GڏwF~wh3ۺ6,^^-ߗL7?7u^Xq1[f8ڿC{u׭[)׭_izW'Zj-#ik Zx^ocmj'N[Y^zi'tѹZhyʼnh߾u淕~돭5nW8.T#oqE:V%bz>[UVMx?F'~[U>v{oz%zVM_\k'h:/zy獯02yl䣮?Pus9htMp;~=qo=XZ-]͉~D5m}zDٟI;׼?_|z"{+&~Ao_K/kǛO<뜯 ىk ExFtN$Z z&'?kqq'A/fv%~O'4~4X7Uy>7'˳klxws[BzMÉ~^ޮs,?zjĉyuԺn3z#&~ny蟸&i?w?}T6W-s5.\O`Om>Ze[|_}6>Ŋ/m>q?nf WO˧9_yϘOhE[_f]G869lzm)vtpާud[w=ۺv&s\WMo.灶]Sܬ0yujH4Cw[G|g|] -[Wt(cjKmN̛skͿm^on5KfOg綾|]Fϗkܟz[+m^0~lunikt{Gj-+Í̋~omr_\YJ|Ouխm)[Hۮk~V}<'iOVK}|vfۗlJ~{oX[(usͿ)Z=W|2:ߢ=-FahvB;[P͇}ƷWc|-ıO?wDr^_B}VQKz=[=_Yٖxb~Vog+r : -fC߳_xk+K}m[N^pQߺ;&]OGZ=gi-~|Q[|>Gb_c|ٺz{myge\v=G'}wmq$۾xW?OO]IjCqf~Z;^19|ou>_+>>15nh&ͤonF !x]uL{]^UG[Lyp⠭C2*ymu$/էqcGَk~k?$P+%_ԯŝr3OZuϓ:}tU>Kov\8{\R_?)_n[?S2ygVCPQk5>?WK0)̣ ?ω{ϒ3_W=Gu;]T]eؼ~I/绬?|M|TҏyƵzz/2OͯO̓t Npgkk~S!]ŤGǾO}}OmƛD qO'm}wmK0[zɯKi?m^kŕG9xq6Zy:ok]g]k~.=_,~VxӉ-δz y,ߐ\vֽ$;|fW{.ZߞRR/9ᬯ[|V??Q<_2ppDK|Y=Q|c]Ny!g}nziC硧ߐ.'5ѹŭSyDv+Qw;N~Z]f[׻q?L_ϭos/}}OKH^X;#}DuO:svSm>6?lqƍ 1{xMX\Zzjz"hf Z"=5nɏ__ɯyx;_S}O[[@m­mJ~n-߄O[5V?iOŹy;mu?ԮN}_n鸧|J.K[=8O ˏOSyi]uk_*IOW?oqlqԟocC[<{+N ].ޟcKt߯T>yM7>P|d~I?xz0]>4N8xk?tgxFt$kwC't?ɓm'1C,sAoA7z0nGmjJ㺝Gg&__}ݷzmFMncN|w["?ן <}_odqDZA?t?`j?{n6R6h+q5.6m_R֓6p]k+q]wk?s^^ֵycuC9?q0iXҟϨ[=>G]H⺍vMu0vzڮ:\+e?᭟\xc |:CIzO[j9nbIw]k]Ilj~5/u6:[G5\뛯2~i۶[k;ms>Iϓ/k[׌}ODz?Ɨz584?Լ_[a}c<|:!>]9M6oZ7goS~-]۷|ޚ7_}>֓2_O>gy"~Fgzn]X;xx~KϓSz_ǖ/>[ [yڵmINhW]2ݷx߯YnOqB*!ig)yDo6O}|?6.x;?|Ipn3$߭M?//ny켎<O10XWzmyʯQ[ʻ-^۸.lmq@o &oIƟo/杚|؉q!y9ijl\'Կd~Xgooy_-NZ|jżQ$?m=FqdHv~-=sY( Hhq65^njokҷvYZ\|ꕼu9&m= {m\]k->D?뺰])m7M~>Xgr%֏Zה=C{2GSH~dtۺg|?R?k6<4?V|߉sŝ}O_V~ޠG-Rv0Re?IUɎ9?]u{ճԟ- _=sFO-~(H9suƙ}͓ھZ[瘟_$ğ>+ ?ov6ZmqVMn\[/ _uKg;i&o[woJW~ʓז'[QZxg"LOm~1b끨_oًNw &e|i9s~M/''/9Bp#A㒯oo{?0|O?3NI\r\5dxV$|D\3>sSlMԚ%ߑGm7KchuCzy m>mK|5^ue$g>O6/}}]}'*P6/ qC-͟4֏"y=2ޮKy=iٟ{s\㨅Jz꣇~6_9nu/qd=0ރP\{aԍgϤi>V_9x ZW^87>qv0=v:81Nqz)ǡ_8J~nQP{~c~v`=o̾S-._Z=-U^S\I<%>k}cKqdy8Sm~pեDα#͋|KO-=[x]X~SkVh4?y/ƷuɿsԦciݲ_p_FDkyx'GُZ%Տl瓆S>ۼFSil6Voѵٝ?q 'Z=ɱڵOꭜ-NzoYӹU?ym~y7~_f~zct^^0>_+v3{xֿ(hS~vkû5 J;s[kym]iG[ {O'g3u cygH?VW$yɊoGyχ[=ߘ/_ߖ< |YXIu:pY|TZzGH^s;sL_r~O{Z&ٿ{7>vmu3q篖/J>:XOWH[.峬1ڹ =x$'?vЮ |QWp([ăo0e{̣}%oS<]g'oqw{㲿3eݭ6=]BZQ7~Oe\QX۞{+ #|݀_yomz޶m|Ǽ;g+ᙬk=Ze|}΀j/[I`h#=v!u֫]~Γty ',]ȅ'ZnS;OZ;Z'p_qԼn>͏c_qk/y.y/׸~9>ҿön2|jT֟dZ_O=_mFIGm!_sőoH+ֺ_u|;OLVgx+f?[Kk7roz]-^kM9N<'~r\cvI+KAծc ѧiϢxjYksp_^[]o|u67'[{o`vemюx峭l_*Qy%߳iho-NZ'S8cyvLV~ucЮ6(\ܶ&wk_cU/]׍:oţupW D{+ l(uZϑ]}7~IJh[8uG65|io3RS+K8ց:Z>ׇ0~B-eN5~[uܪ7+ױ'Mt>߸?P_ef-knzmݽ[?5oזgɟm>6~z]o4K>zNVl\/Z|yl@k[%a6_]͊C?lW}\6۟D{qo%lxNhx^Ð:ϓx"~+Vw$䏖;~۶Pk[;gT7ߖu۹ޗ3v0=GV[ګM.DIZm6~vq=٩g~ƕ='-rc>h-t8x<_)[oqBq:X槶2Y㪷[[X4Z;h[9٧/mzj=|iu]~/k<|zSՑqqaqR[_V<]Ivg[[rg%vpuקuHmeyLdZ9 >Uqsqv>x>}ō5W:6ɧ3-Ź?[~7]x ~IV_G>ҕ<~d?ß~__stݷ>e~|k@Wwi4SI-Ϥt_׭7[!w뉼Oٗ~uai+#yHnx{$Yz4qOֶkyZW=ݏ5?A6F۶uډq~olu[ʕ _gć{ۺ m!_h>ciw>߹~O֩VO~@=ȾQT|*OK g-|_{c]t-nk!Ԏg|]G$gՏgt?Ox~"G%ٯ|ůVCC[ q~~~[(l̃qiyr]-?6y%O:k&_54E=&y>g'E^~| SQql_Gm~8C9twomKv}-H_<=Immv>z}/$y~[g?DNAǼXg>u],#֒IO95X/῭|ćѱJ~+߿Bl$i@}!M仉NͤCl8kھ{s_`ggK7EgZ<*c=?jѵ.N\>mtpkm޾[~Ǡl6oVNsk{_[e8aopkG2;i[K-W?49[}|qV{oS~uV;N)&ً8`$h^wg^o$Oשy_|gC9\iggV[O>/ƒx+}O"|׸ͺ^0I^=,|:No]^S~o>OOzҶN#^NZqnڝE,Zl|OxL9ڸ qDuC|@@ϳW~#kq+S\lExI_%$g[w[Z mr_qaqmb)Pg~ݓ6.[ȖEzݿ=Я{ⷍgz;鿮*e?V&__hm^hu~ߊOGm&qmܵ~-߬uኌ"M'o{o[EIOmq݉yڏ+S>W[qS-ء>+C\`w%? |}Nvf?oa-o>7# [yZZ,.V 2Z׵CRYҥvO^o㮍g{nuݞd׺H]Tk][dgKOj~Ot0?-zN- ~w/Zw6r<[~l_v sZ`h ׎_0At[\qkM/m|-~Zk]oKLy[W{ָXq).sˈn_slҷmً;hd~CyL5&_R$Ums?п`rq dKǙE mlmw5+6VOuз'p䓞yo꿷 t[Czom֢]n'gr~6?4c9`q)VK?2 Go15\Ƿylc[yޏ]~ c/S|81m]iJUO?G/^͓m}e^V1e4L?m]Ku˭MDu}4ǵ$_j<׊ Vo~.G6~V߾Gs73>n;%zEr~*'?H|y͇m'V߂|Z߭a^۸yx0}[=BLK9&HV?͓m]Ǵ䧍r^qk~q I _you@ҵIG8~aqF^rPk.[׮G,o9OyL\#婵m3mdy뼧kunnm \oX<>Gڧ}9KGry:3-.-3}Z:ѾC[W޷fez8H[['~tG2 ATPg 2ܰH |JYݟs6ĸC|OڵN91ɶ? ay3uh8mꁔcٶ^juWˊ3?gYk|=Wk7&=[<a|ko OӰF SgގZ2=e8l֟m'׼ir:iq_~oX>.x4=JO$^Q_ysJ9׼a;ζ#S1]dqO:Γ׼XW/qyZm6튯->̾utּ s]OO?!zay?ŅmoqDY?#}Uɏi@w,rYsɕ|e9W<ʼzޗi|AA|)9_ķ鹔COu8NS:䠼Ҏ'~~1z9?1>̞?{OD0\3~>]#xj'>E;z_)sXZ=s| Oט/i1_'?%}=[Xdq|ؾY5[y]N56m[~>,~b>D:R\ <5_ߋ};-ެP}m͞4iЯ,|庝! ̟ꢱkouD.2!}Kꀶ=ig}g߄[ygz!~ܞ%i=t%ܔ:_iO'74?=BKo=q1?J>^*=-~9:ΧڑWPoڜ~e~5#zb7@>yz{m.l=0OOn?#C*um8/nqlwCx3lOmk~麡[/zNk9$uaͧi>ۈNy*qל뼭W/n6* v?zlI[{~Zߧ/N[{ruҳqnzk:/Oַm^ySμ&?nmbqcW\<п{'ţϭl{^N+mzO,uoL[m'iOAtrhHa˟[}-_o㢍oZZVjii~_:>[֩|I72vm6{4c?_)sk'~l\tۼ`iqumub돫zЮN6ߊ?fv뚷ojW[C~lv{h LƷ5Uq~w[?w<hI[?yZgpk9?m1ׂ[; $\D?x]ƍK39W>Q?+^f[%?Wwh?=%r4om7{4_HZ>|֏`myď ŻWooxd:q]7ٚ= 2=g?\׵y>%Aҳ̯ ,k8uI7nq=t> Im\dY9_k_ze[wy,q6_COm|6Ϻ?{خo`Ob桼bz^ђVv_'0?!n}8[g?>6>>߭?u!W\5|=ѿ/Sy֮?hz޾B?o?:8#=[ϧm[u֡F|Ł& [;Ə߇k]^viv~_EvH=S< M'[:|M^hvln~gz1;x4inN^tqٟk^qaR\Z{S-.y믷~,#>oW{§SmƉOޭ^OwY?߶xd7oqSHs'=ߪzG[zYݽʵ!kh[-k}_?!y.[ڟoH6ڐr^3}D#.7m/e׿F#~֊u?~g+W;|4GK~P_m-vԸ~1O<'ݏuEܟqZ-.*^-s}|]_zeү?5._+{?㿵ۚ/;w;3;*e}ڵ_gIZǷx W-+8F~m{·Wbmyqj[к*e]d)þ %"?.!]ٚ<ėK~*_X47W4fqYoqiykU/^윍>Ou iܚ?'9[d/v|ݗ7Nzj7+Hik[߾]@~pUmVi[m_m[yxk7@7?x7&ܤc~վ7󿵄morgZxֶ!?im %̯x6Z|$)Oכ]}vC}g#d[ϙ݉ W_?#i蝸/xs|t~-IZ:'/^jyXn[z{n_ZEҧ^%x~ӾVףzeh.׋um \ |7}6xm>]^ _a֩o7`q36='~,=qgt,׉'teY֟~8+׉ʿѣv.Cgwmq69Һ+A6_Ϧ?'|_k׺?mfZ~k<;y~)m^͓ٿ=yV?4/ZKﻮ?/#yo}퍿#xջkou*=Fݗ1<#}qO|/n> %m}K*'eag6l ?a?|L 壸mӑR?{m]}s֝-~qnNuS!xQlj@ސؾn(c~G,Zy_s>'kym~A=ٹ QVJ:oywZ#3_[\b'i;~B;:pFx~w?w5Gtճڳ-OjW=u-yCoR\zg4-N?;O>~=ͧ߾}+OӷzQUӸD)ϴ_ɭ>][}<;l=[}ٱ]|>f+߷7~^N?o$'ŭ<7v:m>'X=o/ov]-̾ms|yPq<ײ|`q&G_7yl~G4'l ?zpO1;>䇮owYq6iޔo;pqE8m]Ɨٻ][o_8ppIoS՝V?meKNė͉k|tۼbԿSn)XA 蒾hlr|[[^$xp5(ZfoO[r\haz$~8Ҷu%\nIƗgZIVg%O_w ?~ōYߣ';y5O뜕.Ou}c|iquS=yg|~ZD}Kyiƿ_t]u^jŎ^N#|#nz0w{V Isר:>ԏ : oqNtd\}nYkcB<њsED?ݯi[&_+ ww-K^C}ǷLzlg,/2 /U^$ܵ/,○m?:@?E{{Wv=~wy_y}6_Y\"N}'=9^pU5Β_N+ҿEd݆tŁKǼ#:uC]7/vmquWW=uwmq_,^IGzZ/}ٗv]YJm]^ټ79nŃrr}m~ms4_}*^lm.v-n7=]yiyE|V5իV/-%}n1Nkq/i%mōEg[ٹG O[Vk=+k%:}~~ⶭ b|{޿30NsK[qֿwGyO A5~Z;Z;O~~wkՎm{)Lg\W\][~nmqA||[:iy]z޾C_鳭MvNQWxu-.oq6O5mO[FҳWHu{am[ʾ߯z#["yz G/:z5FS;gufLj~o].]&R fݯ[:?v?uޮ g˓gj+c*)'?.3 #O}67:W<ˡ={ps?oOC9/c}$_m4;f?]ސ߭'9K=9Gom=q;/]K/VgCd1DpGo3m|msʕSث%>ڸoU8'53um<~ja峍Wu֍?ߕ/Imݔ^|7ѡ-NA[Guy)ϛH~6oqu}/3[[%o/G0Z\kJ?k:ix7U?w~wZrZ};oLw&W3)-uI޷h^VW伦#6^ _[BVGuj[~lkzs7ֿvnlځ浺lwlsnys֓槖[g8'[ Oo:wҼ-$į;ɕ<|xnOy=ow7k|y]м^iv[=c/d}[׿{F~T&W|yc[Տ?/x1^G3s1ǭYiʙm]D{?ܮ}}9n]ٞh|'6<9,Sj[<i~Չ xqǷ8oύoC[gŕn+vn^Skkz/_?bK^9z$/}-=_mܴW`7/a%?:+ӏк.Ϻ~Z 7^ZύV$="k>6?DuۖqIn@[_Y]kxgMu}ݷ=1_^D[\G^-^%==}r~ik Or6> 7 nz}\s¡^mi5K;8ω̻ s7y2͟W\-[883ʧ.?ҹ#Oxy._?ǵ^9oIXђ-!ooD;p6^%O꺽CmgqOlz3)o <|c,8SkLovs]|_۷z+Y%Ⱥߪ<֋}/p~÷vߔZhVP}ct}r]I%u^A[%=[w}m}f8/W(ݕ>mm~m=Ath9~7hܭ=,N˟f"> ?oi<||MSk8{:~^h 'q?~|Vv>oiֻ+ޮ<'?lmk%NxZ_h[0Vj=u#x;)5?~5(Wg{SZӔ#9Er߫~wK~Bc鉶< я~;wL^-}{[hMmKg$~k7u{R?\}֯>MYe;ǥѱJnUҥk/q@~s|=(N)߫Ƶy+'^#ŕCfgVh7Ir> tAo㼆ӐmS3ykHxmv{}rعYPQ2NhO\ouo>n<|֯m?[!7{{^W5{)u3ǼNMpï }ۗ^c{!OF8~v!/6V;71k~J>x%|ny [}xz}ֶvO9lou]~~"'V7b|]7%= o_Z<66_um>W3?MOD[=Ftg忍3[=қD UV}6qqė8y_[3 9O\[|zZbc[|i%hqSIgœ7_.1>lŧnDo͓Żᰭɏ,ou݁qTOr\4KE~]D)L ˁ^K߯oף-.$;ٿ;uo.[3u}a~SU3[uOk0uV%?-J3/CY>pa3_Q?#9eG;[Gu8cz&}|n%>ޖ_[7-e|o]+ηD m]m[r?[[8<񼝿m/_|*vSzey.髿 _#|ߗ}v_$~/ҡ_?|](]Om2:co~G _IlxG~j+_-og֚v_z]W,~^x_"V[]ٮںRV:u&ψwZiׇ:>=˓s^:io'cze-]^|[۸r_zFyyDy7o6_k`:Tm}pyP8/wu:'9V?0 o={ҡ:5sF>Rɖ^Fp3Ѵn3|ʞ9ߑoM_淭|Wk'i6\:SyoqE3;hxh5exiĵjo-G,uGLT߯xCb6⺕'ǭyp2m>5;|kK{/N\qO\y'y_InWnkurk>_5dsҋ/X1~lxߺkgayxҪ<S>[mo|'ZR8z~ũ>Yڹ'?⧿5[=<-iͣm\Ik>#0Ҽ'|uly&Y]s/-[lznq(ZCk a+naGrm\~^^- Ǹ} m8T{o%kܟ*=_q1li%i\?m׋+?kzh{nŃuS>?_o`ÜpSZ/oo>VbWD|}^7΋NT7\jW;qOv}vvsRofoϓ_cǺJps9G/ܷL>MktomonvVI8eo㫽N9Lqk{ɯ7͗t:Z0ߞ#Z'h:miTg|Z]tmqwM>leVauT-x͠#&=;-_}m|qu i6W?̿ڷlU?!C~j뤼o׫>ڲ^['s{Z?$Ao'߱^o3>c=5˿Ԇ]);߿l'^^a/>Bw('_\Yޭ9Gm{O+:?uKrH/*o vJgDRyW݈řunr|§#Wu!F '_K/yV:Zcx%sIOzW[}Z}2IOәL/}[.*_7/ُZki>ZOp{~fy6RݑyӺMk_gXV4qTO1k>|8:kh[W~fKmdVSjq/,>~kG9Lu<Wy?%dGۧvjO' >V/8Y>(^:p%kN~wZbut.D~rk_SiO6ou]n4o^h6-Y}^qnh 7m=hܗ_m=Bf^꺤]gyvc|S#8ö>h㨭m'Op~OfSn)9ZO8ny$I:ÆyM|ޘS_!?Gt~1?|Nuև+T_ؾA]y''[dpƵljc\>c}6Ogt߰n1߶ƃ}r[ikbi[xкMQ\j[W~itZJ><m}>gzW6[?|d6kQL'Zv]H7wG~;_W?9߈ﶞ~f>mc|-_G ϿW} y.~q_5?=WM34O(x&>.'3z4/z3ܰb 无.B>|K=<4g:g oI^[_Y[[]~wc_zճ[ߙ4?8u6.:R\e6O\mV-ҾwB.jdsrx_1M_OC=$oD;khc/HS'[Ô ʼnκ>|InθCi4Q=6rh>pm qg'f7$qk^'}G%ٯXޣv=fdӇ9o{a-qKM2o*\ҭC}Z-[]/~zꦶY3UlqŧKw+ym3v?uގo񦽟}_?Z{?)z'-~soƃE뇄÷Z-Pi㜮k\춞zscyŹ6.?՞S|hm#)sM,űkrY8Xnqp[ܶbv~w&g>uxvf?UqDu콋R־$C[^k^כ-iK:soO6ܮ>UvkOIT~XZV}=O+]_:Xnx)NOiXѮPsKgm~Mۺt[{}t-~u̧xxJ31C)q`|ܯߛHz4_9y|.b]oiߟm _xθ.Omɶ=y{NO_9.ǟ?U AZ{RGj%?'vN6?O+ _*gҵXx_;afS6Ot`Lc~B-GWu}&wPut>7^|VN#_<Et9'k=גtv}fu훵/<@R<֟ky/^ΔEG#@Ws '|z\]jtU˝[;ucҿg ZyyJoŷ'ykqcF?Qz=ϡ 7VZqZ?6OFuY~EɇCz'>h<~Ey|g5My4O^_ W~{RGku_k]Y<7ѳϫȩNv< N\N=m/;vcdZNzλ|ݮORЋjWl7[:ڧ>kJ[6| Wwɮtvȸ/?uԟp`OۼdOmAxz/ǝgWL>pki7.G}6y K_8O\>ˑ\S(Lk]g|X}Bv6y鷭+skwz!Lo$Oңy_۸]Hݫџx۾ fڼtoپ|w4[yZ|4aixE}ZUtjY/N/kNk};-Zz9&uZ>7nO6OY\TCh5R~IccC;?M6}q >MGї9_:{,_v~ѷ(/}3ك:|~~?@nI5ly=ǺgsCz%nϣZ:_r\[N#nyW~gg'HQ?oY)mŶn{{-f8JW\h;oQ7os =?7>ָHyնoLV=ŻG N~[?4n%x{ Ծĭ~+Z$suoI.3_~Wg[g6)<+_'[] 44O#4I>_Nk]@[]tiY:eV^\_έy__nog\3k]t}qWسvhڳϭ8kqpSS~?/_އ3,l\Vт?z')oߺ*xݗ>/0_-/鼰C(YfkQ'жuٱ=BSδ'\/{m\OvK9J}OիGBI~/ B_-W 9q6w}oz1ӹ Ӛ[?{Z^=lɳgM6!]5Qu#1= '-om`Ď}^O?))7g5>W+x>'#ژGIDlEa9Xc[DKuQԦuc{'[dT.KY_wOŇ.3|#]x'[}^ɞo|J~ֺ]w^?q] ^ںgԶg>}E9/~i/hmzږuoOUO'=7!\LV߶i }՟[;|Y[ymSAn+m>g5j6?/-Z*޾㩟%-?Touߕ_OQ?M[Z^mVKw=\ZruDҷ|:'>2?Oz s|['wyާ~DC[x^OI}[C>?Aho;szN|_ymibҦ?㫥o-9wKI.?/oO7~.*=ZI=[=Ԫu;q!7DCS㧥8y''rĶh6Έ?xK[>}`_Sw lmr<ޮgqn"Er#R{VqDGLѺo<'AC_x{/6{+v\~mu-S޿>_׮wG<Y7VlW=g9н'|uX˸^^ggil>6O[B/4w_XXNzY?0m$=?[ZQ?+%Ohq'V>8+*̊oo3m+NջAhw+5OO+SUKOg)Oć ٕm|_ؑ?w~m~һA%6N^~O¯?&?F爓T~2{cxsү!_W]ͣY}W7k}e̯Gys n'8ün_ʉ)ѳe}/mC[ x;`ܞu^ƁYqj'ݵwj_Ώgx֏zezt|m=<-O xt-ϵ8b>W9B6x<XOɏIs6{vk]3 o%23S:M/-k>+:?#kSvZ閏Ws^Wϒo?|~O;$ݔcW>Ⱥomqqkx9qB8~*/&=[}ۏmG=юvx[S>oz<>k?pĵ4p!|Y׺hO=_8/֥mQ] VNm_a_޻W?]$kͯ:Ku]7ibZXK'}KcV*Vok}amsZ?[q[}|dw_$w/j_4ߟl?}H+?y<__/}>bwH'':KRÔZҏ#>[sm~ңg'&No뢤kHk~:Up]&;+:pyp3ʿD.y|չDm۸y2mן#b-/W̎I)jAgulYέ?P҃٧'0<{V-?)ZKYcկV?j>|m{Axʓy5>v-^]Ou[zynt[yjw)W-|f'OY0٭ͣ]S;'U6淧g>On|6O>X6_yV*}ּO|[;S&߷u sӋ=?Пe8D&m?yggOyWwAD־$g'_|'ݔDڏz_ρn U+y]ӹ ŽOr ~N>zi?ljpp8W!y[&ݜ>myɷk;o'[Z*۳.[ܑ<63W.vA;Ouf ߺmn>ǧ<Cx R[%ߺOoOi<~ʾMm<=ř6v>Ao~kOuc;S|\A|X^;o>OۮZ}sb5)o|ߞp[q6>nqb4os9nͧ8Mqz艹 zOyo㾍[uwݗ'xNm[[ {*SySZk?3sէNo_vmF+{S8}?Z\YZݽD{D[|/n?S1~O|s3#J}^yuSqsSq@|\.Q{&1nGedoz~"OO=^͇̏Kb|m<_SHWZ?'D#I}G-ASu܍vk50z3Am랚o >_[fw?kOKmu!kܡrS[u\⺮zݶ\ӡqjZG[_1Jy?Pٯ#NrPO+or@8 1uoqQ?/|9M.⋾k_~{KmK|׿q3>}n\Y 1܌/[+X_J~ W$j[n6z;fo-Ϭt81>};>׬uEݾw}p2oƥcEgSᏽ~dO~ųdYf_o Ч8O[ܤ8zn[g&֙Wj6ZJ=QG[?ߌSO懫|E5xg.4rų>@/ry;_+Çu_ZzѮ˿E>g8exvnO>p7湏Scr^uw>RE'[;:2o?wmވ9g/tp|>O'^t,vmM}uټbeCy s^C-bI+uq ~hq=ݯyo wmim=ڮ[mkZWfG׻n4OM"nNtvիsySQ4~I:lܘⵍҥ)nuG?3{D6p>4_gKzqӸ:Ň?ٯ˼Gֻ-S<}9_83O%?+됶^$m|]Gh=;*kkAO6?WMq>I>֠Ѵmv6߼yU3s٭L{i1I)9֮.|Ӹ<8œ]\|5~Ʒ.gX^_5ﮞ Wϧq}%"l}sV-":gCһ Z>-~Z<2;޵vO>g:oRxn^8onޙ]g񤝟ivYSC=/8ڶwCPk4'8nIuG 끫_U#eJ<{]N:5?+nyL_IC S`USOsOSz9-oh[u_=.!OkP;p|isD-LoO k_Q4~~3-==euegrݦbgz_~83̨sByAyja|Wdm==$v)L޿.}/Pڧ]N ңeZ7Y=㔏.y գ4Az}ocSK|hɒloֵm8+wSW6/~q}|ŗ.^'g qS}ZSy眏6?n:齍i~laI_sǧUq[Gu{uyޟ0Oҹ'?8o 4_~1Oϫަt%=WfM<^(d~-h[x2JL.I]ܠV]1ki6|8[[օm1]=vqN돷mMT[=7>v2ŧmOZ|*o-VLIp)^ߧil^7wYzh yF:]su<]<5wi8=mw#/'h=gxh:ǭ?m߁T>w+]=M/7:q/W`U t_Rxoq9G}~lB`ɼgR|-]{5n~_+c_~_XS}2t>n#Z> >ߗJF7煶V7er{8"Q\~6wV\x<dlC߇3::_@|b~DIέTg<)^HǵݮKm"߂<6_G⮌c~Umz~ G.L=~vK규gZ7R"gZ;.h[<<ļq~⹥Q=Fv6&=i^/g<)Њ1.R;=EX#y `Ez ~SE~'7>1 zw_H{$9N2ȋ_$-ζYwy_'/_v_cߴhq;YL9vnW+OB}TZ=ƍ=7P4꣩)gk[ZiuS!/WÜ6.ayu?vqW.S;ש~ ]M>H}xtvzꇩ۵ny#9Ͼ3/6f?+کi\ю_qmu NnV9VӼ`sc8SZity)̏@q~d7Ǜ^vW%oxfiGA/U VٿwGe?4mS}e?y>m%0S}摳Mgqŝ~ħ[oE鹰[}OIq>g'lݲg,^ӸY|ZCa_q#Wɤ-ZG3# g->L~oG1+Ϟ!zM}ŧy 'l6)nh`E;s>g2}_C}*=߷<ݷ ow]|jwzicJzoOUroɻ~w5>Omigh^sܿj}V,gq3CIk'4ϒ[>o襜sݯ9_kmX}Ƕ߰nĵOj>4~Gxgz݌~Ӷ(O?z4/LG+}?Y7l~m~ eoUz_^ݺg})n[Q8kql~bqemϯi_qW^5<=/`Vם[o}C^1=֥Ϯ~յ9~ $Gޟ4X'~~!zDw?E~NZ]ֿ3q8R>r%X=tN}paoAoZ7oq-%S;m|gOZh]fDZ>LޙV_S4[0_Z'Lk6ߵ${Wg>U1v.`Ǵcm>om1;)甏=렶z>y.Q׭M0@Ox9勮R]w4q{s)?$_3{l]o?y_V=OLKCI6:kawƉ_iw&y>G ƿs_VG.=s^淖뫾/v٥rxa5\I֞s;'s§~U:~Zl&O8ڕwW5-;綋׻kϙ/44YR>۶qf<#}`?v~}5vg޿t;/_ǂ-+ôn\i컿~xm9ikh}9CqhheϾ个iٔcV.w<ڴ~:>בzꇻNkǦP'zD%͏g߫ 3S=^oR;~s^JKoTμ&{-r\eߤs6/|v,n󬯶#owoSv}gM-~YG+5+C|zlAKmAow|Z'vq+w$솸mt֙S|4&+=y%:i}>E%_"@O8wxoLqmُ|om ߇~mkzHro~QkG Q$>yGES;Nik-.^ռ5[_>-⢝ǡޏ絝m+m.r߄|V5~}dd[]>LO/~'=UƕmO~l}euCguïx}lb^'}gqWUxU~)i~[N'^]?}CXi@voD/~ؼv#/#RMׯy&?@1y]Xkr^kEyKKA_M>ه5{~ |ɿSl>cu"`y;-h|[Zg3S6ңs]x)}KwM}Jo-~~J~n-ӆ[ge7n^]y^:^x^}7zu;us7owS~Z#\xMf7o򥞩?o:-uM|6OmDnkq?]}Öϒzoo֍y?zizy|~klۼwzkSi|ݲ={X7MOyD:OyM89UKCz~`ǿ_1}NۺmkʷS= mkyOON'i8uUn|vR\<}:Q3`އ~n_Z̾ޡGZq_*~/`\]P]ծuHvoe>_!s޲}hMBWnۭi|̣4l9. yZ\~]\>'7_x֞mQ?{K:mkh,Zou/vmmr vTvAoqKokznާD߇yc8w;#e@̧x8HvM>mEz_я%(yg7MmTGkR$S}x?e^Q\ެSuS{Z>0no+gʗL[Zoy[m^$Y %-`!gyiQusOqf\W%_]Ot|1O[_J~y[:'[כ=ߖgo{mfvG1g㰍V3gquS4O[NkLqe7z}/_G<)ti_ OYǿ%l׫_6W]]/#8͇|aw`}S^h8yy$7~ vR:S :hOZΗ4"~hck.2OWICI$o#sH)w1wLnO_G:/mnkQ>sYjgt]zC?OM4{_>3\5}HꉖN>jk/w;?0xT%1?Fxh{4߄$cqBRirW_l}S^[7gaqNkkrZ},ypc~^s/7q_~_? uۧ-c>okxixpeڵ}Gg8`lT͎!'}- ,C:g ;^,xnur[gZܘ ^zMڷjx^~aAϜ} (֫g[oA{5P#|io+_뺡oD$7筝.+J H2?Н`sG[h:?ۼ׉WX'nniq!~v_/ /Q_SܸnBJ߮Wph}֎pk6ۼ5]_wm>_◮;z;Kg8D~:ͷķ/K@>#Z~"u9nX0ܷuٖK_7?Ck[ߘ|ٿ+2.4`]h=}7}Ps6_̿yG4~}zl䰸iVL_> Y/me/ݧ2hv=%՗9h&P_q|h_a1tr5VuExeƅ31"gW'+ُ6Lexe>]ߒvvwv$oˇM]ESZ;S}ѳBAߵ~N}ocXezk{+h6;kG^eWkW\OyWisƻkŞpեOSz1odgWQm!\jĩ?PC~<==n]J~vqx4wk+9Zoӟi=@ϓi~+џ3#^[U*yU\U݇ʽ*ǭn]Ծ}\>?lW߯K-Njjin+~Mi*n뀫[ާ*vOשqWwύyn]uk];OʵǦx2w7V8唯%{^O[= +3t6Ls/+Ԯ:vqc7&<8u0Ԃqdٺ{7?\4Gxii7i|<+sm٭w3Sh~T|\Gݯ~vz{(oL[7N4v}Nɖ7 ?)zjS=tI\G;'>k[}w$W&=Fo! $3}|g9كOAv1k#fkmuCOot[f?oZ_eK[Ih7C_w$k^OU[=z[Ѷz|bx:0}'=_6{hJ>@=Tλ[ؔ)mWz|Z/[7xy >$ wfM~|6run~x17+ڌ =~{l|S}n.:=JvmZܷqn|8}i΂sٻ:ouQ-pc?.:r[?+[akvʼ=]wOBm\eo}6W>R+I^/m;b|V? RN#Ww尸,!n|V_(.-&;L+ 7ήU?R)~.]%Tiv?}?gاVL S;mq+ۭa/ϧn2;)#Az6Vͷ`<_0մO,Γ>z-?zDwŵGULowWңvwߣ]'=џc/9}ty>hAqLۼi~mvUqU!LԩUrluצ녗W떏)ʿ߮svS+g+[ٕ{7oMK+Y;j|S7GWZw[;}~5O*kOqpWy]_*>zg'/ߵ1~^t|A?:Q/i$Zxj[#,?qW< ߵ$>?oI;]?I} ˻߿2?imux9\+fBzޤkqW\GoMFu@3_g~GA #lM_+i넔kƥ>}r8g679]s77wϙvr_<7 hύ  q4rśw5~Mtɯy謁k67Pݺpc0}89Wg~gjq֏Z?}˧CM=Z>[zamvlQްm릜/[+[|8aVkO}Wyq~~o+M~^}_$L|vlD|_zK;|&,Sn?-.mgJN-΋r|z^RٮIɿ$驕;Ta\:Iи ԏ no7}7ixN|8տzSs/!}uwX? eE+r|k5<ƕOm?9.-.i:vŵ_}>7s}/Ǚ<75Ivm=4{}$).8euYɟz_v:#yh~?HpZ\ucz4A?X~SOzoYIgI?~]'7S2|he"{'?-ml7_V_M}F8J[|iACr_m1'f-ukn%~Zi3F:Ϸ乵g`UO}o.qZAh*.8ki;7cz<ߐ}~ft}K΂lx MOK7z<:C|i<~i*+鷭+Dl#¾M.mFppp{h+˰} P/ZϋVq-1]h}OaB[slA#nh^mGݴC^}0ɣ}2?~B]>omtZ!)`>-!?ܟ)Q󒜥u=xGq߇yyuكnrAOCyvIO9oT7zv?&3<4_yMui賮(7s)K&|yƹhyOo(+qC2n?0zjE+~ounWhHovuF+r?~層k>Dt}ٮ=R6?h>'^6[9ֻ`g܇4t?gZS;s>]OvMz4ekMm=@^M[1O^/*i8~m=%{\[ny8 S.ow58o _O;/ Vw_S|F/6oZ\u\Qt`~:9A?zC1|iLq~cctyvjMo/5ԯğpw>jƝտ7;-S[NqiKj߫ ~WRxhvů]ycU|]Ri>ylGnNKrϦi*_NjXZ߷i|;óaO]PK'>4%MS9TzOMuU눗ڏ >Wgg%gw}nxkvv~6~?[y(Χqܮ`a\Dt_oSVNf~$Gۿ>6g?qc7OzNu巳qzO[wcM> D;n^ڍ×,?NT-};UMuƃɳ`>w]麋'שIoמmVoGo(Hm׭_6ŏ-iz>Zϩ(ݼ[:׆QI5ūv]P⯞iU7)B[{%ݼ6knkg_KOtZWƯџ~|6=7Ɖ~-?|~-fEcZG~a=5o8Ki^Ksi߶+m]maG_Z;Z{v] _Tiz]OOÕv6Ov(ǯ<"nq"ٟxʇVLzwE?O3ى诸n$=krmk#Z}Oi7Zk|fpU>ɔum-.ypUH6LǙ\J?cuu[MQ[Wlz9n󆯴Vki gں٭{y~K}{~Kt`qBnSOsi0ozn&]*xq}VoN:v]Ao髽?v\{~\aKGhLgv>2OV?LzJM?I#=>~ku3߶9~/vzqgK$woϷO=_֏,/mzV/-[}A-}1×6zŵyk ɏ-ϑ=9ygɯ#{=Bzl_Z=80$33DOsӺvk:N}') _߿=e~71%33 VZkT`'?~gGk''{ߐ-A~콾ōԶ:.[VOz xNu3=oq]Ds77C"y Ndb}kqc[>ҧx!-}+O;{;NSn[rع {cx6M>}5i>Y+Svk} ?|y|Mc1:+ɷt'_ROj}Kzoy<8:kCóyiEk3-ͳy+ה8޾Oc|twV~8b|/멝^?[W~sЍGS/yN-ЛB{vŭ#9,_|-޴q gywm-͟פ<ˮނqIyiL&WZC19Q۽Om]wkCc~8Czhm>c+/ùAooi_hqُ|iy6ن^;,5Sť;gZ5Z=}˿4njnOxnDvi]?4[7<.֞SώVZG<|\ l}t/Za-帶3~~v1c7Ǿm}k EkO\{G?Ŀ 253*7VV8Wm|nKn]?J;]Lvm\O4]=ͫvm>j:m7w;l!vx X^MOxpN3vސƗ@Yj]?}_{^$u Od^a='+ڷL)ookS{ SkO%'{jLv䏹M&P5犨qK#\Z\i>S'a͎i{A7[5f?-;?Iwφv|xi~̓h~t~:;CʺVz=ݞnk0i'x_#Džϊy/a<ۊܵ[hqe_컼mW 1=.&'}Ci럩ԯHVO)Nq=4ZYo-^+@j%3ݟYpwZIooFp|Ŋ6ϘzϧGcxb8Gp6:}]qfߩ'ZyMni^DT?ے~-vn}btM^O+'}u کpo[,/^%_ú,Ϭ9t6O&wOqś]Z{^ıbw;2~ed?OtLu[zޞъ6h.W>ϖ}=e[#~| >)ny-cNZ7!QuAOtqm RW}:Ve+I-݁̿]|ֿ#67ᅩ ~BoT/ᠭOW[Qq[ݿP Ӄ8տ1mrVY8֩Z}8ũ!^[|?)Πf?×[]:vv=gXCl0Iz4}%$ }u+[L[?-_n?_'_ǵ̏xh vqhB^I'SUb,gMgZg^UGM׭t}ްj$94O\yǷ|uҿʞ$gZv;ay 0y]^4^_0nӺSҫcǔ{V_W{Zh_j?jj׫7^v]Vk֯S{lyh;owvW\q潫~m~ڭW9vzjARNKkZ/.o֞4շr#~h|ǻ֏oN%6MN⧍c|Nݺ?Z0.OjiLN8;ʼn϶])Y}`4kj|F|6>u::s[ٚvDS yi9]W3xu~c|7*E~A70]Ϯ$4ŊƵ8BW-3ͯ=h~OY]Q!6^{NjS&^qo<7N|HhǿG8<_+״~n }8rH.l]<}y0ǝ}krq4 g}ga|-1֎->멽/ゟw4 Ϭw-L-6:1>Wo:O٭&>1@]0_;V~`v <O?)dOρLM_?d;_0|uk h]Et>o_]7ihu}g~کy Lj._)h"{_ei?K'7rԁFl >fy|ac{97'>[?qX_y_=|MygǿwWo~Ok/jO߇kpCy {Ӿ#~.oc,yNwf)Nbn璽['٩7UIS_oecrK<>L~mj'K70{bx壖m$mL9l[K~߮ۓny-H&9e֭-?0$\~~1zV[~+z:[<7_g+<õ'*foVu~?S]FrX]_W]9se<0}/S9,?gwIϭ39:2Jy OxZSz YLl[,xf~gY?Us{,v%:/K>~>{X+m.K:&\[\ǵͧFwNJ{+봞KFsiOqHlN,._#ש] ߩ_=J<;h}ep[O8շՍV?ٸ#^u[>'{Uz`zjϟGuƊyߡhp7Or_m ?ֿ~"[o+=s5܇k_:v|[_J>=Zy=]u>ӶN}%أHG3]/)v3ϩ}c|vrA#y? ߉>a?Z9ZݶՏCr3wVP}k|zm)N͗m} o7m4eϧzyyS'<֭ntTQ|.}ģP]vmaYa4-5~)Qw(n:$.. rvuWYpꩣԿ}2/([f;#Ӄ3ZyMJzmcEcKyv &>Myţk|nc^0 Om1џav8Nߓ[=ӯn#o~cxaz3yK=i]koyj*HlbSIm7ujg㓮>av|iz=V|~sEdG ?g甫Շ添a^OUNΊġOtid}'UCKg~j=g|Xhߵ[K~H|?= [i<>6M:p:ԊW_':4n]o96|1O8nq|~ϴحGZ|!]tM|̇gXN*ǵ1k2AGl^[m<+W7;$=;>cԇg˖u(eUm~S?|r5{7{ /Hoj?оO;'kK[[OGc?i﹌/yEgOB~s?<^..?Lm=/Cׂwmr_yxyv뛩->ɕS_}ۊwTXl+W^ [ƕ~Ggx`V.'l/yӺ(ri3"q'N3K;ў]ǵҞvkAK88r~ A?}>ݯ=7i>7>;mikq%ϭZ/k?[/Z:w->>i|֗Wڼ1nvm<.!uYEm+k7NiӺa+;~a+ezxZ;|֟m`ϻF3{=oOsWig_[Np/gt|zkjm7==Cd}a_?]4~?w֮9)^^_,oe[%>扫 ݾ'5%~Ԟ-A-on3ōݸ[C> gMTֿ浩F^ ۛ6;,N<3⿝oI#o-yN6Z=AtK]ϭWH.!_柫fZ7soӸ2|x'9>(c"bt (wuT~OߍOM[x6r\Ҽvn~s?ӣܑL>i7Nk縤y]7m\yz~GoH|g? -gqNSܵqϾZo--gKb߁mqd=Bk-?Y^o-N'9 lv}kwӸhsyOvƿū=_Y88kgozWiۮW?n㤷{: -^Nu5%9T?}4|v|Oiuiqp~iy,ou4.(m0?=7vJ9~ljD0u=$G#IgZ_>ȊO^xN8KI^Oi^[ku1#terX]k]rMy[;݇:dZ߯h[n,ozxjc56טּ6uRȅ~d3)G;m[!g }Gl^ÛhŔnߛNxa0h G7~ğn>uB[X|u?t|j\z]'5aMNR#~ Ƶu7kxLOȿۼ֏_OCl|gc7YSȳ(9b=+[scouʊ96O>m}jDx}hrv?8%_e@ٟ%W'mB[ܷz*]=}O<__~ P~[<_K|5{Wcz3' uK7ھ9mDzZûNIO9kxO]->e^![;Lā=8C}w;).VN~{ؿS{_K?6t\gϹ/gVϕy[3 ͓~3~Ri=@xo}Ol= Ǡo!X|jϳ|yk:cvۮ-"0f'VRW?o{jogO{V8oh,ox귍ӳxwޓ}ɕZvBxԞs8z{gŋVVOX;vO^ vڂ~(NT׊v7#6Yrl͇_q?Q^8[.Ʒnsw~kOs!oyI?wjY)aO Vqٿ[|Ə[ 3:"w/im~DpT}):[ !/n̿ W [ԯ LghVO)׊̏wHϧux_buc$ǒSgf~s9 'ߠze]{nE_'<y?g+%ϷjS:+%?<w) DmyY_Sh:w{~v6ټ>ݢl:5n om9ǖ?Gu ?Gu&h^ K.L%"EIֻ4IWV_7t-uփWK^Z@73q'{A'|ْ>s]A!J+[>i5YJy[1~Cyop%}K~)Ap߫]?xićO#> W;7*Wo7՛۰an߆} Q{Q{Q{Q{Q{Q{Q{Q{Q{Q{Q{}F7jo^ߨQ{}F͍ڛ77jonܨQ{sF͍ڛ7jooިQ{{F7jooݨQ{wFݍڻw7jnݨQ{F7joߨQ{FÍڇ7jn>ܨ}QpFÍڇ7jo>ި}QxF7jj2rW;}^} Ϳ}7܍}{7wc?܍;辺;辺;Owt>}tGݧ;Owt达;达;达;o边;o边;o辽;o辽;o辽;辻;辻;辿;辿;辿;~;~;~;~;~{WOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWOwxtWO׷}Zqgraph/COPYRIGHTS0000644000176200001440000000036114430573263013161 0ustar liggesusersCOPYRIGHT STATUS ---------------- This code is Copyright (C) 2010 - 2019 Sacha Epskamp All code is subject to the GNU General Public License, Version 2. See the file COPYING for the exact conditions under which you may redistribute it. qgraph/man/0000755000176200001440000000000014430573263012316 5ustar liggesusersqgraph/man/centrality_auto.Rd0000644000176200001440000000761214521125563016016 0ustar liggesusers\encoding{UTF-8} \name{centrality_auto} \alias{centrality_auto} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Automatic centrality statistics of graphs } \description{ This function can be used on several kinds of graphs to compute several node centrality statistics and the edge-betweenness centrality. The input graph can be an adjacency matrix, a weight matrix, an edgelist (weighted or unweighted), a \code{\link{qgraph}} object or an \code{\link[igraph]{igraph}} object. } \usage{ centrality_auto(x, weighted = TRUE, signed = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A graph. Can be a \code{qgraph} object, an \code{igraph} object, an adjacency matrix, a weight matrix and an edgelist, or a weighted edgelist. } \item{weighted}{Logical, set to \code{FALSE} to set all edge weights to 1 or -1} \item{signed}{Logical, set to \code{FALSE} to make all edge weights absolute} } \details{ The function recognizes whether the network is unweighted vs. weighted, undirected vs. directed, and connected vs. disconnected and computes a set of centrality indices that is best suited for that particular kind of network. Edge signs are always disregarded, while edge weights and directions, if present, are considered in the computation of the indices. If the network is disconnected, closeness centrality is computed only considering the largest component (notice that this is different from what function \code{\link{centrality}} does). If \code{x} is unweighted and directed, then the indegree, the outdegree, the node betweenness centrality, the closenes centrality, and the edge betweenness centrality are computed. If \code{x} is unweighted and undirected, then the degree, the node betweenness centrality, the closenes centrality, and the edge betweenness centralities are computed. If \code{x} is weighted and directed, then the instrength and the outstrength (same as indegree and outdegree, but considering weights), the node betweenness centrality, the closeness centrality, and edge betweenness centralities are computed If \code{x} is weighted and undirected, then the strength, the node betweenness centrality, the closenes centrality, and edge betweenness centralities are computed. Additionally, the shortest path length between each pair of nodes is also computed for all the kinds of networks. } \value{ A list containing: \item{node.centrality }{A dataframe that includes the node centrality statistics. A subset of the following centrality indices is included, depending on the input network: \code{Degree}, \code{InDegree}, \code{OutDegree}, \code{Strength}, \code{InStrength}, \code{OutStrength}, \code{Betweenness}, and \code{Closeness}.} \item{ShortestPathLengths }{A matrix containing the shortest path lengths of each pairs of nodes. These path lenghts are based on the inverse of the absolute edge weights.} \item{edge.betweenness.centrality}{The edge betweenness centrality statistic (Newman & Girvan, 2004). Edges are ordered by their decreasing centrality.} } \references{ Newman, M. E. J., Girvan, M. (2004). Finding and evaluating community structure in networks. Phisical Review E 69(026113). Costantini, G., Epskamp, S., Borsboom, D., Perugini, M., Mõttus, R., Waldorp, L., Cramer, A. O. J., State of the aRt personality research: A tutorial on network analysis of personality data in R. Manuscript submitted for publication. } \author{ Giulio Costantini (giulio.costantini@unimib.it), Sacha Epskamp (mail@sachaepskamp.com) } \seealso{ \code{\link{qgraph}}, \code{\link{centrality}} } \examples{ set.seed(1) adj <- matrix(sample(0:1,10^2,TRUE,prob=c(0.8,0.2)),nrow=10,ncol=10) Q <- qgraph(adj) centrality_auto(Q) # notice that a value NA is returned for the closeness centrality of nodes 3 and 9, which are not # strongly connected to the largest component of the network (3 cannot reach other nodes, 9 cannot # be reached). } \keyword{ graphs } \keyword{ centrality } qgraph/man/CentAndClusfuns.Rd0000644000176200001440000000702214430573263015645 0ustar liggesusers\name{centrality and clustering plots} \alias{centralityPlot} \alias{centralityTable} \alias{clusteringPlot} \alias{clusteringTable} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Centrality and Clustering plots and tables } \description{ These functions can be used to facilitate interpreting centrality and clustering coefficients. The plot functions use ggplot2 (Wickham, 2009). The table functions create a long format table which can easilly be plotted in ggplot2. } \usage{ centralityPlot(..., labels, scale = c("raw0", "raw", "z-scores", "relative"), include =c("Degree","Strength","OutDegree","InDegree","OutStrength", "InStrength"), theme_bw = TRUE, print = TRUE, verbose = TRUE, standardized, relative, weighted = TRUE,signed = TRUE, orderBy = "default", decreasing = FALSE) clusteringPlot(..., scale = c("raw0", "raw", "z-scores", "relative"), labels, include , signed = FALSE, theme_bw = TRUE, print = TRUE, verbose = TRUE, standardized, relative,orderBy = "default", decreasing = FALSE) centralityTable(..., labels, standardized = TRUE, relative = FALSE, weighted = TRUE, signed = TRUE) clusteringTable(..., labels, standardized = TRUE, relative = FALSE, signed = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{\dots}{ Objects usuable in the \code{\link{getWmat}} generic, such as qgraph objects and weights matrices. Can also be lists containing these objects. Graphs in a list will be plotted in the same panel as different lines and graphs in seperate arguments will be plotted in seperate panels. } \item{scale}{Scale of the x-axis. \code{"z-scores"} to plot standardized coefficients, \code{"raw"} to plot raw coefficients, \code{"raw0"} to plot raw coefficients while including 0 on the x-axis and \code{"relative"} to show values on a relative scale from 0 (lowest) to 1 (highest).} \item{labels}{ A vector overwriting the labels used. Can be missing. } \item{include}{ A vector of measures to include. if missing all measures available will be included. Not included by default are \code{"Closeness"}, \code{"Betweenness"}, \code{"ExpectedInfluence"}, \code{"OutExpectedInfluence"}, and \code{"InExpectedInfluence"}. Can also be \code{"all"} or \code{"All"} to include all available centrality measures. } \item{theme_bw}{ Adds the ggplot2 black and white theme to the plot } \item{print}{ If \code{TRUE}, the plot is sent to the print command and returned invisible, if \code{FALSE} the plot is returned normally. Needed to include plots in e.g., pdf files. } \item{verbose}{ Should messages be printed to the console? } \item{standardized}{ Logical, should all measures be standardized? Argument is deprecated and will be removed. } \item{relative}{ Logical, should all measures be scaled relative to the largest value? Argument is deprecated and will be removed. } \item{weighted}{Logical, set to \code{FALSE} to set all edge weights to 1 or -1} \item{signed}{Logical, set to \code{FALSE} to make all edge weights absolute} \item{orderBy}{String indicating which measure to order by. Can be default (alphabetical), or one of the measures plotted (e.g., \code{"Strength"})} \item{decreasing}{Logical indicating if the nodes should be ordered increasing or decreasing} } \references{ H. Wickham. ggplot2: elegant graphics for data analysis. Springer New York, 2009. } \author{ Sacha Epskamp and Jolanda Kossakowski } qgraph/man/EBICglasso.Rd0000644000176200001440000001144414430573263014524 0ustar liggesusers\name{EBICglasso} \alias{EBICglasso} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Compute Gaussian graphical model using graphical lasso based on extended BIC criterium. } \description{ This function uses the \code{\link[glasso]{glasso}} package (Friedman, Hastie and Tibshirani, 2011) to compute a sparse gaussian graphical model with the graphical lasso (Friedman, Hastie and Tibshirani, 2008). The tuning parameter is chosen using the Extended Bayesian Information criterium (EBIC). } \usage{ EBICglasso(S, n, gamma = 0.5, penalize.diagonal = FALSE, nlambda = 100, lambda.min.ratio = 0.01, returnAllResults = FALSE, checkPD = TRUE, penalizeMatrix, countDiagonal = FALSE, refit = FALSE, threshold = FALSE, verbose = TRUE, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{S}{ A covariance or correlation matrix } \item{n}{ Sample size used in computing \code{S} } \item{gamma}{ EBIC tuning parameter. 0.5 is generally a good choice. Setting to zero will cause regular BIC to be used. } \item{penalize.diagonal}{ Should the diagonal be penalized? } \item{nlambda}{ Number of lambda values to test. } \item{lambda.min.ratio}{ Ratio of lowest lambda value compared to maximal lambda } \item{returnAllResults}{ If \code{TRUE} this function does not return a network but the results of the entire glasso path. } \item{checkPD}{ If \code{TRUE}, the function will check if \code{S} is positive definite and return an error if not. It is not advised to use a non-positive definite matrix as input as (a) that can not be a covariance matrix and (b) glasso can hang if the input is not positive definite. } \item{penalizeMatrix}{ Optional logical matrix to indicate which elements are penalized } \item{countDiagonal}{ Should diagonal be counted in EBIC computation? Defaults to \code{FALSE}. Set to \code{TRUE} to mimic qgraph < 1.3 behavior (not recommended!). } \item{refit}{ Logical, should the optimal graph be refitted without LASSO regularization? Defaults to \code{FALSE}. } \item{threshold}{ Logical, should elements of the precision matrix that are below (log(p*(p-1)/2)) / sqrt(n) be removed (both before EBIC computation and in final model)? Set to \code{TRUE} to ensure high specificity. } \item{verbose}{ Logical, should progress output be printed to the console? } \item{\dots}{ Arguments sent to \code{\link[glasso]{glasso}} } } \details{ The glasso is run for 100 values of the tuning parameter logarithmically spaced between the maximal value of the tuning parameter at which all edges are zero, lamba_max, and lambda_max/100. For each of these graphs the EBIC is computed and the graph with the best EBIC is selected. The partial correlation matrix is computed using \code{\link{wi2net}} and returned. When \code{threshold = TRUE}, elements of the inverse variance-covariance matrix are first thresholded using the theoretical bound (Jankova and van de Geer, 2018). } \value{ A partial correlation matrix } \references{ Friedman, J., Hastie, T., & Tibshirani, R. (2008). Sparse inverse covariance estimation with the graphical lasso. Biostatistics, 9(3), 432-441. Chicago Jerome Friedman, Trevor Hastie and Rob Tibshirani (2011). glasso: Graphical lasso-estimation of Gaussian graphical models. R package version 1.7. http://CRAN.R-project.org/package=glasso Foygel, R., & Drton, M. (2010, November). Extended Bayesian Information Criteria for Gaussian Graphical Models. In NIPS (pp. 604-612). Chicago Revelle, W. (2014) psych: Procedures for Personality and Psychological Research, Northwestern University, Evanston, Illinois, USA, http://CRAN.R-project.org/package=psych Version = 1.4.4. Bates, D., and Maechler, M. (2014). Matrix: Sparse and Dense Matrix Classes and Methods. R package version 1.1-3. http://CRAN.R-project.org/package=Matrix Jankova, J., and van de Geer, S. (2018) Inference for high-dimensional graphical models. In: Handbook of graphical models (editors: Drton, M., Maathuis, M., Lauritzen, S., and Wainwright, M.). CRC Press: Boca Raton, Florida, USA. } \author{ Sacha Epskamp } \examples{ \dontrun{ ### Using bfi dataset from psych ### library("psych") data(bfi) # Compute correlations: CorMat <- cor_auto(bfi[,1:25]) # Compute graph with tuning = 0 (BIC): BICgraph <- EBICglasso(CorMat, nrow(bfi), 0, threshold = TRUE) # Compute graph with tuning = 0.5 (EBIC) EBICgraph <- EBICglasso(CorMat, nrow(bfi), 0.5, threshold = TRUE) # Plot both: layout(t(1:2)) BICgraph <- qgraph(BICgraph, layout = "spring", title = "BIC", details = TRUE) EBICgraph <- qgraph(EBICgraph, layout = "spring", title = "EBIC") # Compare centrality and clustering: layout(1) centralityPlot(list(BIC = BICgraph, EBIC = EBICgraph)) clusteringPlot(list(BIC = BICgraph, EBIC = EBICgraph)) } } qgraph/man/VARglm.Rd0000644000176200001440000000167214430573263013743 0ustar liggesusers\name{VARglm} \alias{VARglm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Computes a vector autoregressive lag-1 model using GLM } \description{ This function computes a VAR model using glm. } \usage{ VARglm(x, family, vars, adjacency, icfun = BIC, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A data frame } \item{family}{ The family to be used. Defaults to \code{gaussian} if data is continuous or \code{binomial} if data is binary } \item{vars}{ Vector of variables to predict. If missing all variables are predicted. } \item{adjacency}{ Adjacency matrix. If missing full network is estimated } \item{icfun}{ Information criterium function to be included in the output } \item{\dots}{ Arguments used in the \code{icfun} } } \value{ A list containing: \item{graph}{The estimated graph} \item{IC}{The information criterium} } \author{ Sacha Epskamp } \examples{ \dontrun{ ### Examples from lavCor (lavaan): ### library("lavaan") # Holzinger and Swineford (1939) example HS9 <- HolzingerSwineford1939[,c("x1","x2","x3","x4","x5", "x6","x7","x8","x9")] # Pearson correlations cor_auto(HS9) # ordinal version, with three categories HS9ord <- as.data.frame( lapply(HS9, cut, 3, labels=FALSE) ) # polychoric correlations, two-stage estimation cor_auto(HS9ord) } } qgraph/man/qgraph.animate.Rd0000644000176200001440000001567314430573263015520 0ustar liggesusers\name{qgraph.animate} \alias{qgraph.animate} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Animate a growing network } \description{ This function is meant to facilitate the creation of animations based on growing networks. Networks are created based on the Fruchterman Reingold algorithm, which is constraint by limiting the maximum displacement of nodes that are already in the graph. } \usage{ qgraph.animate(input, ind = NULL, ..., constraint = 10, growth = "order", titles = NULL, sleep = 0, smooth = TRUE, plotGraphs = TRUE, progress = TRUE, initLayout) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{input}{ A weights matrix of the graph or a list of weigths matrices with different weights of the same graph (see details). See \code{\link{qgraph}}. Edgelists are currently not supported. } \item{ind}{ An object that specifies which nodes ar included or excluded. See details. } \item{\dots}{ Additional arguments sent to \code{\link{qgraph}} } \item{constraint}{ The constraint factor of included nodes. See details. Defaults to 10 for an soft-constrained animation. Set to Inf for a hard-constrained animation. } \item{growth}{ The way nodes are added by default. Set to "order" to include nodes in the order they appear in the weigths matrix and to "degree" to include nodes based on their degree (high degree first) } \item{titles}{ Optional vector with a title for each plot } \item{sleep}{ Optional value sent to Sys.sleep() for showing the animation in R } \item{smooth}{ Logical. If set to \code{TRUE} smoothing via \link[stats]{loess} is performed on the layout of all frames. } \item{plotGraphs}{ Logical. If set to \code{FALSE} graphs are not plotted. } \item{progress}{ Logical. If set to \code{TRUE} progress bars are included. } \item{initLayout}{An optional n by 2 matrix containing the initial placement of nodes in the animation.} } \details{ Let n be the number of nodes in total in the graph. This function is designed to facilitate the production of animations by constraining the Fruchterman Reingold algorithm. Several frames are plotted of (a subset of) the same graph. If a node was already in the graph its maximum displacement per iteration of Fruchterman Reingold is equal to the number of nodes times the inverse of the constraint argument (so by default n/10). The higher this constraint value the stricter nodes stay in the same place between plots. How many and which plots are made are defined by the 'input' and 'ind' arguments. There are two ways to specify the 'input' argument, either by speficying one weigths matrix or by specifying a list of weights matrices. In the sections below is explained what both of these methods do and how they are used. This function, since it can be seen as an expression that makes several plots, works well in combination with the animation package for saving the animation to a wide variety of filetypes. } \section{Single weigths matrix}{ If 'input' is a single weigths matrix then in each frame a subset of the same graph is plotted. This is especially usefull for animating the growth of a network. Which nodes are in each frame is determined by the 'ind' argument. If 'int' is not specified an animation is created in which in each frame a single node is added. This node is either in order of apearance in the weigths matrix or by its degree, which is determined with the 'growth' argument. If 'ind' is a logical vector of length n than the first frame will contain the nodes specified with this vector and all other frames will grow in the same way as explained above (each step one node is added). If 'ind' is a numeric vector of length n which contains all integers between 1 and n ( a single entry per node) then the first frame starts with only the node specified in the first element of the vector and in frame i the ith element is added (each step one node is added). If 'ind' is a list with numeric vectors as elements containing integers between 1 and n then in frame i the nodes from the ith element of the list will be added. Node numbers that occur multiple times in the list are ignored (they are already added the first time). Finally, if 'ind' is a logical matrix with n columns and an arbitrary amount of rows, then in frame i only the nodes that are TRUE in row i are included. This is the only way to specify removal of nodes. } \section{List of weigths matrices}{ The 'input' argument can also be given a list of weigths matrices if all these matrices have the same dimension (i.e.\ only the weights differ). If this is done than in frame i the ith weigths matrix is used. This is especially usefull for animating the change in a graph. In this case, the 'ind' argument behaves differently. If this argument is not specified then in each frame all nodes are included. If 'ind' is a logical vector of length n then only one plot is made with the nodes specified with that vector, and only if the length of 'input' is one. Other methods woth in the same way as above. However, if the 'ind' argument indicates a different number of frames than the 'input' argument the function will stop and give an error. } \value{ Invisibly returns a list of all graphs. } \author{ Sacha Epskamp (mail@sachaepskamp.com) } \references{ Sacha Epskamp, Angelique O. J. Cramer, Lourens J. Waldorp, Verena D. Schmittmann, Denny Borsboom (2012). qgraph: Network Visualizations of Relationships in Psychometric Data. Journal of Statistical Software, 48(4), 1-18. URL http://www.jstatsoft.org/v48/i04/. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{qgraph}} } \examples{ \dontrun{ ## For these examples, first generate a scale free network using preferential attachment: # Number of nodes: n <- 100 # Empty vector with Degrees: Degs <- rep(0, n) # Empty Edgelist: E <- matrix(NA, n - 1, 2) # Add and connect nodes 1 and 2: E[1, ] <- 1:2 Degs[1:2] <- 1 # For each node, add it with probability proportional to degree: for (i in 2:(n - 1)) { E[i, 2] <- i + 1 con <- sample(1:i, 1, prob = Degs[1:i]/sum(Degs[1:i]),i) Degs[c(con,i+1)] <- Degs[c(con,i+1)] + 1 E[i, 1] <- con } # Because this is an edgelist we need a function to convert this to an adjacency matrix: E2adj <- function(E,n) { adj <- matrix(0,n,n) for (i in 1:nrow(E)) { adj[E[i,1],E[i,2]] <- 1 } adj <- adj + t(adj) return(adj) } ### EXAMPLE 1: Animation of construction algorithm: ### adjs <- lapply(1:nrow(E),function(i) E2adj(E[1:i,,drop=FALSE],n)) qgraph.animate(adjs,color="black",labels=FALSE,sleep=0.1, smooth = FALSE) rm(adjs) ### EXAMPLE 2: Add nodes by final degree: ### adj <- E2adj(E,n) qgraph.animate(E2adj(E,n),color="black",labels=FALSE,constraint=100,sleep=0.1) ### EXAMPLE 3: Changing edge weights: ### adjW <- adj*rnorm(n^2) adjW <- (adjW + t(adjW))/2 adjs <- list(adjW) for (i in 2:100) { adjW <- adj*rnorm(n^2) adjW <- (adjW + t(adjW))/2 adjs[[i]] <- adjs[[i-1]] + adjW } qgraph.animate(adjs,color="black",labels=FALSE,constraint=100,sleep=0.1) } } qgraph/man/wi2net.Rd0000644000176200001440000000176614430573263014027 0ustar liggesusers\name{wi2net} \alias{wi2net} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Converts precision matrix to partial correlation matrix } \description{ A small function that converts a precision matrix (inverse of covariance matrix) to a partial correlatin matrix. This can be done by standardizing the precision matrix and changing the sign of the offdiagonal entries. Many methods exist for obtaining a precision matrix (Such as the glasso package; Friedman, Hastie and Tibshirani, 2011) but the partial correlation matrix is easier interpretable and better usuable in qgraph. } \usage{ wi2net(x) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A precision matrix } } \value{ A partial correlation matrix } \references{ Jerome Friedman, Trevor Hastie and Rob Tibshirani (2011). glasso: Graphical lasso-estimation of Gaussian graphical models. R package version 1.7. http://CRAN.R-project.org/package=glasso } \author{ Sacha Epskamp }qgraph/man/ggmFit.Rd0000644000176200001440000000373314430573263014030 0ustar liggesusers\name{ggmFit} \alias{ggmFit} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Obtain fit measures of a Gaussian graphical model } \description{ Obtain fit measures of a given Gaussian graphical model (GGM). Input can be either a partial correlation matrix, inverse covariance matrix or \code{qgraph} object. } \usage{ ggmFit(pcor, covMat, sampleSize, refit = TRUE, ebicTuning = 0.5, nPar, invSigma, tol = sqrt(.Machine$double.eps), verbose = TRUE, countDiagonalPars = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{pcor}{ Implied partial correlation matrix or \code{qgraph} object. } \item{covMat}{ Observed variance-covariance matrix } \item{sampleSize}{ The sample size used in computing the variance-covariance matrix } \item{refit}{ Logical, should the network be refitted using \code{\link[glasso]{glasso}}? } \item{ebicTuning}{ EBIC tuning parameter. } \item{invSigma}{ Implied inverse variance-covariance matrix. If this object is assigned \code{pcor} is not used. } \item{nPar}{ Number of parameters, if not specified this is retrieved from the number of zeroes in the inverse variance--covariance matrix. Can be used to compute fit measures of any statistical model (e.g., SEM). } \item{tol}{ Tolerance for setting an edge to zero. } \item{verbose}{ Logical, should progress reports be printed to the console? } \item{countDiagonalPars}{ Logical, should the diagonal of the precision matrix be counted as parameters? } } \author{ Sacha Epskamp } \examples{ library("psych") # Load BFI data: data(bfi) bfi <- bfi[,1:25] # Covariance matrix: CovMat <- cov(bfi[,1:25], use="pairwise.complete.obs") # Compute network: EBICgraph <- qgraph(CovMat, graph = "glasso", sampleSize = nrow(bfi), tuning = 0.5, layout = "spring", title = "BIC", details = TRUE) # Obtain fit measures: fitNetwork <- ggmFit(EBICgraph, CovMat, nrow(bfi)) fitNetwork } qgraph/man/qgraph_loadings.Rd0000644000176200001440000000503514430573263015752 0ustar liggesusers\name{qgraph.loadings} \alias{qgraph.loadings} \title{qgraph.loadings} \description{ This function is a wrapper function for \code{\link{qgraph}} designed to visualize factor loadings. } \usage{ qgraph.loadings( fact, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{fact}{A matrix containing factor loadings (items per row, factors per column) or an "loadings" object} \item{...}{Additional optional arguments passed to \code{\link{qgraph}} and special arguments used in this function (described below).} } \section{Additional optional arguments}{ \describe{ \item{layout}{If "default" a standard layout for factor models will be made. If this is "circle" the default layout is circled (factors in the centre, items at the edge). No other layouts are currently supported.} \item{vsize}{A vector where the first value indicates the size of manifest variables and the second value indicates the size of latent variables.} \item{model}{"reflective" to have arrows go to manifest variables, "formative" to have arrows go to latent variables or "none" (default) for no arrows} \item{crossloadings}{Logical, if TRUE then for each manifest variable the strongest loading is omitted (default to FALSE).} \item{groups}{An optional list containing the measurement model, see \code{\link{qgraph}}} \item{Fname}{When there is only one factor, this is it's name. If there are more factors, the names in the groups list are used only if the factors can be identified.} \item{resid}{Values for the residuals} \item{residSize}{Size of the residuals, defaults to 0.1} \item{factorCors}{Correlation matrix of the factors} }} \references{ Sacha Epskamp, Angelique O. J. Cramer, Lourens J. Waldorp, Verena D. Schmittmann, Denny Borsboom (2012). qgraph: Network Visualizations of Relationships in Psychometric Data. Journal of Statistical Software, 48(4), 1-18. URL http://www.jstatsoft.org/v48/i04/. } \author{ Sacha Epskamp (mail@sachaepskamp.com) } \seealso{ \code{\link{qgraph}} } \examples{ \dontrun{ # Load big5 dataset: data(big5) data(big5groups) big5efa <- factanal(big5,factors=5,rotation="promax",scores="regression") big5loadings <- loadings(big5efa) qgraph.loadings(big5loadings,groups=big5groups,minimum=0.2, cut=0.4,vsize=c(1.5,15),borders=FALSE,vTrans=200, model = "reflective", resid = big5efa$uniquenesses) # Tree layout: qgraph.loadings(big5loadings,groups=big5groups,minimum=0.2, cut=0.4,vsize=c(1.5,15),borders=FALSE,vTrans=200, layout="tree",width=20,model = "reflective", resid = big5efa$uniquenesses) } } qgraph/man/ggmModSelect.Rd0000644000176200001440000000763714430573263015174 0ustar liggesusers\name{ggmModSelect} \alias{ggmModSelect} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Unregularized GGM model search } \description{ This function will search for an optimal Gaussian graphical model by minimizing the (extended) Bayesian information criterion of unregularized GGM models. Selecting unregularized GGMs according to EBIC has been shown to converge to the true model (Foygel & Drton, 2010). The algorithm starts with refitting models from the glassopath, and subsequently adds and removes edges until EBIC can no longer be improved (see details). Note, contrary to \code{\link{EBICglasso}}, the default for the EBIC hyperparameter gamma is set to 0, indicating BIC model selection. } \usage{ ggmModSelect(S, n, gamma = 0, start = c("glasso", "empty", "full"), stepwise = TRUE, considerPerStep = c("subset", "all"), verbose = TRUE, nCores = 1, checkPD = TRUE, criterion = 'ebic', ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{S}{ A covariance or correlation matrix } \item{n}{ Sample size used in computing \code{S} } \item{gamma}{ EBIC tuning parameter. 0 (default) leads to BIC model selection. 0.25 or 0.5 are typical choices for more conservative model selection. } \item{start}{ What model should stepwise search start from? \code{"glasso"} to first run glasso to obtain the best fitting model, \code{"empty"} for an empty network, \code{"full"} for a saturated network, or a matrix encoding the starting network. } \item{stepwise}{ Logical indicating if stepwise model search should be used. } \item{considerPerStep}{ \code{"subet"} to only consider changing edges that previously indicated improvement in EBIC, unless changing no edge indicated an improvement to EBIC, in which case all edges are again considered (see details). \code{"all"} will consider changing all edges at every step. } \item{verbose}{ Logical, should progress reports be printed to the console? } \item{nCores}{ The number of cores to use in testing models. } \item{checkPD}{ If \code{TRUE}, the function will check if \code{S} is positive definite and return an error if not. It is not advised to use a non-positive definite matrix as input as (a) that can not be a covariance matrix and (b) glasso can hang if the input is not positive definite. } \item{criterion}{ String indicating an output of \code{\link{ggmFit}} to be minimized } \item{\dots}{ Arguments sent to \code{\link[glasso]{glasso}} } } \details{ The full algorithm is as follows: 1. Run glasso to obtain 100 models 2. Refit all models without regularization 3. Choose the best according to EBIC 4. Test all possible models in which one edge is changed (added or removed) 5. If no edge can be added or changed to improve EBIC, stop here 6. Change the edge that best improved EBIC, now test all other edges that would have also lead to an increase in EBIC again 7. If no edge can be added or changed to improve EBIC, go to 4, else, go to 6. When \code{stepwise = FALSE}, steps 4 to 7 are ignored. When \code{considerPerStep = "all"}, all edges are considered at every step. Note that this algorithm is very slow in higher dimensions (e.g., above 30-40 nodes). Note that EBIC computation is slightly different as in \code{\link{EBICglasso}} and instead follows the implementation in Lavaan. } \value{ A list with the following elements: \item{graph}{The optimal partial correlation network} \item{EBIC}{EBIC corresponding to optimal network.} } \references{ Foygel, R., & Drton, M. (2010). Extended Bayesian information criteria for Gaussian graphical models. In Advances in neural information processing systems (pp. 604-612). } \author{ Sacha Epskamp } \examples{ \dontrun{ # Load data: library("psych") data(bfi) # Compute polychoric correlations: corMat <- cor_auto(bfi[,1:25]) # Optimize network: Results <- ggmModSelect(corMat, nrow(bfi), gamma = 0.5, nCores = 8) # Plot results: qgraph(Results$graph, layout = "spring", cut = 0) } } qgraph/man/mutualInformation.Rd0000644000176200001440000000151514430573263016324 0ustar liggesusers\name{mutualInformation} \alias{mutualInformation} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Computes the mutual information between nodes } \description{ Computes the mutual information from one node to all other nodes, or between sets of nodes. } \usage{ mutualInformation(ggm, from, to = "all", covMat) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ggm}{ Partial correlation network. Can be missing if 'covMat' is supplied. } \item{from}{ Integer vector corresponding to one set of nodes. Defaults to all nodes. } \item{to}{ Integer vector corresponding to another set of nodes, or \code{'all'} to compute the mutual information of each node to all other nodes. } \item{covMat}{ Variance-covariance matrix. Can be missing if 'ggm' is supplied. } } \author{ Sacha Epskamp } qgraph/man/pathways.Rd0000644000176200001440000000345114430573263014450 0ustar liggesusers\name{pathways} \alias{pathways} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Highlight shortest pathways in a network } \description{ This function highlights the shortest paths between nodes in a network made by \code{\link{qgraph}}. Based on Isvoranu et al. (2016). } \usage{ pathways(graph, from, to, fading = 0.25, lty = 3) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{graph}{ Output from \code{\link{qgraph}}. } \item{from}{ A vector indicating the first set of nodes between which pathways should be highlighted. Can be numeric or characters corresponding to node labels. } \item{to}{ A vector indicating the second set of nodes between which pathways should be highlighted. Can be numeric or characters corresponding to node labels. } \item{fading}{ The fading of the edges that are not part of shortest paths between 'from' and 'to'. } \item{lty}{ The line type of the edges that are not part of shortest paths between 'from' and 'to'. } } \references{ Isvoranu, A. M., van Borkulo, C. D., Boyette, L. L., Wigman, J. T., Vinkers, C. H., Borsboom, D., & Group Investigators. (2016). A Network Approach to Psychosis: Pathways Between Childhood Trauma and Psychotic Symptoms. Schizophrenia bulletin, sbw055. } \author{ Sacha Epskamp and Adela M. Isvoranu } \seealso{ \code{\link{qgraph}} } \examples{ library("qgraph") library("psych") data(bfi) # Compute correlations: CorMat <- cor_auto(bfi[,1:25]) # Compute graph with tuning = 0 (BIC): BICgraph <- qgraph(CorMat, graph = "glasso", sampleSize = nrow(bfi), tuning = 0, layout = "spring", title = "BIC", details = TRUE) # All paths between Agreeableness and Neuroticism: pathways(BICgraph, from = c("A1","A2","A3","A4","A5"), to = c("N1","N2","N3","N4","N5")) } qgraph/man/qgraphMixed.Rd0000644000176200001440000000244214430573263015060 0ustar liggesusers\name{qgraphMixed} \alias{qgraphMixed} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Plots a mixed graph with both directed and undirected edges. } \description{ This function can be used to plot a network in which each node is connected by at most 3 edges; one undirected edge and two directed edges. } \usage{ qgraphMixed(undirected, directed, parallel = TRUE, parallelAngle = pi/6, diagUndirected = FALSE, diagDirected = TRUE, ltyUndirected = 1, ltyDirected = 1, curve = 1, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{undirected}{ The undirected network weights matrix. } \item{directed}{ The directed network weights matrix. } \item{parallel}{ Logical indicating if edges should be plotted parallel or curved. } \item{parallelAngle}{ See \code{\link{qgraph}} } \item{diagUndirected}{ Logical indicating if the diagonal of the undirected edges should be included. } \item{diagDirected}{ Logical indicating if the diagonal of the directed edges should be included. } \item{ltyUndirected}{ lty of undirected edges } \item{ltyDirected}{ lty of directed edges } \item{curve}{ Curvature of directed edges } \item{\dots}{ Arguments sent to \code{\link{qgraph}} } } \author{ Sacha Epskamp } qgraph/man/big5.Rd0000644000176200001440000000165514521125365013437 0ustar liggesusers\encoding{UTF-8} \name{big5} \alias{big5} \docType{data} \title{ Big 5 dataset } \description{ This is a dataset of the Big 5 personality traits that will be used in talks and the paper. It is a measurement of the Dutch translation of the NEO-PI-R on 500 first year psychology students (Dolan, Oort, Stoel, Wicherts, 2009). } \usage{data(big5)} \references{ Hoekstra, H. A., Ormel, J., & De Fruyt, F. (2003). NEO-PI-R/NEO-FFI: Big 5 persoonlijkheidsvragenlijst. Handleiding [Manual of the Dutch version of the NEO-PI-R/NEO-FFI]. Lisse, The Netherlands: Swets and Zeitlinger. Dolan, C. V., Oort, F. J., Stoel, R. D., & Wicherts, J. M. (2009). Testing measurement invariance in the target rotates multigroup exploratory factor model. Structural Equation Modeling, 16, 295--314. } \format{ The format is: num [1:500, 1:240] 2 3 4 4 5 2 2 1 4 2 ... - attr(*, "dimnames")=List of 2 ..$ : NULL ..$ : chr [1:240] "N1" "E2" "O3" "A4" ... } qgraph/man/centrality.Rd0000644000176200001440000000770014521125441014757 0ustar liggesusers\encoding{UTF-8} \name{centrality} \alias{centrality} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Centrality statistics of graphs } \description{ This function can be used on the output of \code{\link{qgraph}} to compute the node centrality statistics for weighted graphs proposed by Opsahl, Agneessens and Skvoretz (2010). } \usage{ centrality(graph, alpha = 1, posfun = abs, pkg, all.shortest.paths = FALSE, weighted = TRUE, signed = TRUE, R2 = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{graph}{ A "qgraph" object obtained from \code{\link{qgraph}}} \item{alpha}{ The tuning parameter. Defaults to 1. } \item{posfun}{ A function that converts positive and negative values to only positive. Defaults to the absolute value. } \item{pkg}{ Package to use. Either \code{"qgraph"} or \code{"igraph"}. Defaults to \code{"qgraph"} for directed networks and \code{"igraph"} for undirected networks. } \item{all.shortest.paths}{ Logical if all shortest paths should be returned. Defaults to \code{FALSE}. Setting this to true can greatly increase computing time if \code{pkg = "igraph"}. } \item{weighted}{Logical, set to \code{FALSE} to set all edge weights to 1 or -1} \item{signed}{Logical, set to \code{FALSE} to make all edge weights absolute} \item{R2}{Logical, should R-squared (predictability) be computed for GGM structures?} } \details{ This function computes and returns the in and out degrees, closeness and betweenness as well as the shortest path lengths and shortest paths between all pairs of nodes in the graph. For more information on these statistics, see Opsahl, Agneessens and Skvoretz (2010). Self-loops are ignored in computing centrality indices. These statistics are only defined for positive edge weights, and thus negative edge weights need to be transformed into positive edge weights. By default, this is done by taking the absolute value. The algorithm used for computing the shortest paths is the well known "Dijkstra’s algorithm" (Dijkstra, 1959). The algorithm has been implemented in R, which can make this function take several minutes to run for large graphs (over 100 nodes). A future version of qgraph will include a compiled version to greatly speed up this function. } \value{ A list containing: \item{OutDegree}{A vector containing the outward degree of each node.} \item{InDegree}{A vector containing the inward degree of each node.} \item{Closeness}{A vector containing the closeness of each node.} \item{Betweenness}{A vector containing the betweenness of each node} %\item{rspbc}{Randomized Shortest Paths Betweenness Centrality as implemented in \code{\link[NetworkToolbox]}{rspbc} %\item{hybrid}{Hybrid centrality as implemented in \code{\link[NetworkToolbox]}{hybrid} \item{InExpectedInfluence}{Expected incoming influence - sum of incomming edge weights connected to a node (not made absolute)} \item{OutExpectedInfluence}{Expected outgoing influence - sum of outgoing edge weights connected to a node (not made absolute)} \item{ShortestPathLengths }{A matrix containing the shortest path lengths of each pairs of nodes. These path lenghts are based on the inverse of the absolute edge weights raised to the power alpha.} \item{ShortestPaths }{A matrix of lists containing all shortest path lengths between all pairs of nodes. Use double square brackets to index. E.g., if the list is called 'res', res$ShortestPaths[[i,j]] gives a list containing all shortest paths between node i and j.} } \references{ Opsahl, T., Agneessens, F., Skvoretz, J. (2010). Node centrality in weighted networks: generalizing degree and shortest paths. Soc Netw. 32:245–251. Dijkstra, E.W. (1959). A note on two problems in connexion with graphs. Numerische Mathematik 1, 269–271. } \author{ Sacha Epskamp (mail@sachaepskamp.com) } \seealso{ \code{\link{qgraph}} } \examples{ set.seed(1) adj <- matrix(sample(0:1,10^2,TRUE,prob=c(0.8,0.2)),nrow=10,ncol=10) Q <- qgraph(adj) centrality(Q) } \keyword{ graphs } qgraph/man/smallworldness.Rd0000644000176200001440000001061514521125566015661 0ustar liggesusers\name{smallworldness} \alias{smallworldness} \encoding{UTF-8} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Compute the small-worldness index. } \description{ Compute the small-worldness index (Humphries & Gurney, 2008) relying on the global transitity of the network (Newman, 2003) and on its average shortest path length. } \usage{ smallworldness(x, B = 1000, up = 0.995, lo = 0.005) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A graph. Can be a \code{qgraph} object object, an \code{igraph} object, an adjacency matrix, a weight matrix and an edgelist, or a weighted edgelist. } \item{B}{ The number of random networks. } \item{up}{ The upper quantile. } \item{lo}{ the lower quantile. } } \details{ The function computes the transitivity of the target network and the average shortest path length. Then it computes the average of the same indices on \code{B} random networks. The small-worldness index is then computed as the transitivity (normalized by the random transitivity) over the average shortest path length (normalized by the random average shortest path length). The \code{lo} and \code{up} quantiles of the distribution of the random networks are also returned for both the transitivity and the average shortest path length. A network can be said "smallworld" if its \code{smallworldness} is higher than one (a stricter rule is \code{smallworldness}>=3; Humphries & Gurney, 2008). To consider a network as "smallworld", it is also suggested to inspect that the network has a transitivity substantially higher than comparable random networks and that its average shortest path length is similar or higher (but not many times higher) than that computed on random networks. Edge weights, signs and directions are ignored in the computation of the indices. } \value{ \item{smallworldness}{the "small-worldness" index proposed by Humphries & Gurney (2008)} \item{trans_target}{the global transitivity of the target network (Newman, 2003)} \item{averagelength_target}{the average shortest path length in the target network} \item{trans_rnd_M}{the average transitivity in the \code{B} random networks} \item{trans_rnd_lo}{the \code{lo} quantile of the transitivity in the \code{B} random networks} \item{trans_rnd_up}{the \code{up} quantile of the transitivity in the \code{B} random networks} \item{averagelength_rnd_M}{the average shortest path length in the \code{B} random networks} \item{averagelength_rnd_lo}{the \code{lo} quantile of the shortest path length in the \code{B} random networks} \item{averagelength_rnd_up}{the \code{up} quantile of the shortest path length in the \code{B} random networks} } \references{ Costantini, G., Epskamp, S., Borsboom, D., Perugini, M., Mottus, R., Waldorp, L., Cramer, A. O. J., State of the aRt personality research: A tutorial on network analysis of personality data in R. Manuscript submitted for publication. Humphries, M. D., & Gurney, K. (2008). Network "small-world-ness": a quantitative method for determining canonical network equivalence. PLoS One, 3(4), e0002051. Newman, M. E. J. (2003). The structure and function of complex networks*. SIAM Review, 45(3), 167–256. } \author{ Giulio Costantini (giulio.costantini@unimib.it), Sacha Epskamp (mail@sachaepskamp.com) } \note{ If a directed network is given as input, an edge between every two nodes i and j is considered present if there is an arrow either from i to j or from j to i or both. } %% ~Make other sections like Warning with \section{Warning }{....} ~ \examples{ set.seed(1) # a regular lattice. Even if the small-worldness is higher than three, the average path length is # much higher than that of random networks. regnet<-igraph::watts.strogatz.game(dim=1, size=1000, nei=10, p=0, loops=FALSE, multiple=FALSE) smallworldness(regnet, B=10) \dontrun{ # a small-world network: the transitivity is much higher than random, the average path length is # close to that of random networks swnet<-igraph::watts.strogatz.game(dim=1, size=1000, nei=10, p=.1, loops=FALSE, multiple=FALSE) smallworldness(swnet, B=10) # a pseudorandom network: both the average path length and the transitivity are similar to random # networks. rndnet<-igraph::watts.strogatz.game(dim=1, size=1000, nei=10, p=1, loops=FALSE, multiple=FALSE) smallworldness(rndnet, B=10) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{smallworld} \keyword{transitivity}qgraph/man/print.qgraph.Rd0000644000176200001440000000072714430573263015230 0ustar liggesusers\name{print.qgraph} \alias{print.qgraph} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Print edgelist } \description{ This function prints the edgelist of a "qgraph" object } \usage{ \method{print}{qgraph}(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A "qgraph" object } \item{...}{ These arguments are not used } } \author{ Sacha Epskamp (mail@sachaepskamp.com) } \seealso{ \code{\link{qgraph}} } qgraph/man/makeBW.Rd0000644000176200001440000000347014430573263013757 0ustar liggesusers\name{makeBW} \alias{makeBW} \title{ A qgraph plot can be understood in black and white } \description{ Plot a qgraph network that can be understood also in black and white or grayscale. Positive lines are full and negative ones are dashed. Nodes colors are associated to unique motifs. Up to 12 different motifs are supported at the moment. } \usage{ makeBW(x, colorlist = NA, plot = TRUE) } \arguments{ \item{x}{ A qgraph object } \item{colorlist}{ Optional: a vector of colors. See details. } \item{plot}{ logical: if FALSE, the network is not plotted. } } \details{ If no colorlist is specified, each color is randomly associated to one of the motifs. Specifying colorlist serves for (a) assigning colors to a specific motif, because the first color in the vector will always be associated to the first motif (this can be used e.g., for being consistent across plots), or (b) for associating motifs only to some of the colors, but not to others, since only in colors in the colorlist are associated to motifs if a colorlist is specified. } \value{ Silently returns a qgraph object "x" in which two new elements are present, "$graphAttributes$Nodes$density" and "$graphAttributes$Nodes$angles", which affect how the nodes are plotted. Can also be further customized and then re-plotted using plot(x). } \author{ Giulio Costantini } \examples{ set.seed(1) x <- cor(matrix(rnorm(25), nrow = 5)) colors <- c("red", "red", "blue", "blue", "white") # colored qgraph plot qg <- qgraph(x, colors = colors) # randomly assing motifs to colors (notice that white nodes stay white) makeBW(qg) # associate a motif only to one of the colors makeBW(qg, colorlist = c("blue")) # define an order, which allows to choose motifs makeBW(qg, colorlist = c("blue", "red")) makeBW(qg, colorlist = c("red", "blue")) } \keyword{black} \keyword{white} qgraph/man/averageLayout.Rd0000644000176200001440000000136714430573263015424 0ustar liggesusers\name{averageLayout} \alias{averageLayout} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Computes an average layout over several graphs } \description{ This function can be used to compute a joint layout over several graphs. } \usage{ averageLayout(..., layout = "spring", repulsion = 1, layout.par) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{\dots}{ Multiple graph objects such as \code{qgraph} objects or weights matrices. } \item{layout}{ Same as in \code{\link{qgraph}} } \item{repulsion}{ The repulsion parameter as used in \code{\link{qgraph}}. } \item{layout.par}{ Same as in \code{\link{qgraph}} } } \value{ A layout matrix } \author{ Sacha Epskamp } qgraph/man/flow.Rd0000644000176200001440000000340314430573263013554 0ustar liggesusers\name{flow} \alias{flow} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Draws network as a flow diagram showing how one node is connected to all other nodes } \description{ This function will draw one node of interest on the left, then subsequently draw all other nodes in vertical levels to the right, in the order of direct (unweighted) connectiveness to the node of interest. Layout is based on the \code{layout_as_tree} function from the igraph package. This allows one to see how one node connects to other nodes in the network. } \usage{ flow(object, from, horizontal = TRUE, equalize = TRUE, minCurve = 1, maxCurve = 4, unfadeFirst = FALSE, fade = TRUE, labels, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A qgraph object } \item{from}{ Integer or character indicating the (label of the) node of interest. } \item{horizontal}{ Logical, should the flow diagram be plotted horizontally or vertically } \item{equalize}{ Logical, should the placement of nodes be equalized per level. } \item{minCurve}{ Minimum curve of edges on the same level } \item{maxCurve}{ Maximum curve of edges on the same level } \item{unfadeFirst}{ Logical, should edges between the node of interest be unfaded? } \item{fade}{ 'fade' argument as used in \code{\link{qgraph}} } \item{labels}{ 'labels' argument as used in \code{\link{qgraph}} } \item{\dots}{ Arguments sent to qgraph } } \author{ Sacha Epskamp } \examples{ \dontrun{ # Load data: library("psych") data(bfi) # Compute polychoric correlations: corMat <- cor_auto(bfi[,1:25]) # Glasso network: g2 <- qgraph(corMat, cut = 0, graph = "glasso", sampleSize = nrow(bfi), threshold = TRUE) # Flow from A2: flow(g2, "A2", horizontal = TRUE) } }qgraph/man/big5groups.Rd0000644000176200001440000000213414521125343014664 0ustar liggesusers\encoding{UTF-8} \name{big5groups} \alias{big5groups} \docType{data} \title{ Big 5 groups list } \description{ This is the groups list of the big 5 data. It is a measurement of the Dutch translation of the NEO-PI-R on 500 first year psychology students (Dolan, Oort, Stoel, Wicherts, 2009). } \usage{data(big5groups)} \references{ Hoekstra, H. A., Ormel, J., & De Fruyt, F. (2003). NEO-PI-R/NEO-FFI: Big 5 persoonlijkheidsvragenlijst. Handleiding [Manual of the Dutch version of the NEO-PI-R/NEO-FFI]. Lisse, The Netherlands: Swets and Zeitlinger. Dolan, C. V., Oort, F. J., Stoel, R. D., & Wicherts, J. M. (2009). Testing measurement invariance in the target rotates multigroup exploratory factor model. Structural Equation Modeling, 16, 295--314. } \format{ The format is: List of 5 $ Neuroticism : num [1:48] 1 6 11 16 21 26 31 36 41 46 ... $ Extraversion : num [1:48] 2 7 12 17 22 27 32 37 42 47 ... $ Openness : num [1:48] 3 8 13 18 23 28 33 38 43 48 ... $ Agreeableness : num [1:48] 4 9 14 19 24 29 34 39 44 49 ... $ Conscientiousness: num [1:48] 5 10 15 20 25 30 35 40 45 50 ... } qgraph/man/summary.qgraph.Rd0000644000176200001440000000077714430573263015576 0ustar liggesusers\name{summary.qgraph} \alias{summary.qgraph} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Summary method for "qgraph" } \description{ This function creates a brief summary based on a "qgraph" object. } \usage{ \method{summary}{qgraph}(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ A "qgraph" object } \item{...}{ These arguments are not used } } \author{ Sacha Epskamp (mail@sachaepskamp.com) } \seealso{ \code{\link{qgraph}} } qgraph/man/qgraph.Rd0000644000176200001440000015663214521125565014103 0ustar liggesusers\encoding{UTF-8} \name{qgraph} \alias{qgraph} %- Also NEED an '\alias' for EACH other topic documented here. \title{qgraph} \description{ This is the main function of qgraph which automatically creates an appropriate network and sends it to the plotting method.} \usage{ qgraph( input, ... ) } \arguments{ \item{input}{Can be either a weights matrix or an edgelist. Can also be an object of class "sem" (sem), "mod" (sem), "lavaan" (lavaan), "principal" (psych), "loadings" (stats), "factanal" (stats), "graphNEL" (Rgraphviz), "pcAlgo" (pcalg), \code{"huge"} (huge), \code{"select"} (huge) or the output of \code{glasso}} \item{...}{Any additional arguments described below. Also a list with class "qgraph" can be added that contains any of these arguments (this is returned invisibly by the function)} } \section{Important additional arguments}{ \describe{ \item{layout}{This argument controls the layout of the graph. "circle" places all nodes in a single circle, "groups" gives a circular layout in which each group is put in separate circles and "spring" gives a force embedded layout. It also can be a matrix with a row for each node and x and y coordinates in the first and second column respectively. Defaults to "circular" in weighted graphs without a groups list, "groups" in weighted graphs with a groups list, and "spring" in unweighted graphs. Can also be a function from the igraph package.} \item{groups}{An object that indicates which nodes belong together. Can be a list in which each element is a vector of integers identifying the numbers of the nodes that belong together, or a factor.} \item{minimum}{Edges with absolute weights under this value are not shown (but not omitted). Defaults to 0. Can also be set to \code{"sig"} to only show significant edges for \code{graph = "cor"} and \code{graph = "pcor"}). Significance level is controlled by \code{alpha} and \code{bonf} arguments} \item{maximum}{qgraph regards the highest of the maximum or highest absolute edge weight as the highest weight to scale the edge widths too. To compare several graphs, set this argument to a higher value than any edge weight in the graphs (typically 1 for correlations).} \item{cut}{In weighted graphs, this argument can be used to cut the scaling of edges in width and color saturation. Edges with absolute weights over this value will have the strongest color intensity and become wider the stronger they are, and edges with absolute weights under this value will have the smallest width and become vaguer the weaker the weight. If this is set to \code{0}, no cutoff is used and all edges vary in width and color. Defaults to 0 for graphs with less then 20 nodes. For larger graphs the \code{cut} value is automatically chosen to be equal to the maximum of the 75th quantile of absolute edge strengths or the edge strength corresponding to 2n-th edge strength (n being the number of nodes.)} \item{details}{Logical indicating if minimum, maximum and cutoff score should be printed under the graph. Defaults to FALSE.} \item{threshold}{A numeric value that defaults to 0. Edges with absolute weight that are not above this value are REMOVED from the network. This differs from \code{minimum} which simply changes the scaling of width and color so that edges with absolute weight under minimum are not plotted/invisible. Setting a threshold influences the spring layout and centrality measures obtained with the graph whereass setting a minimum does not. In the case of correlation (\code{graph = "cor"}) or partial correlation (\code{graph = "pcor"}) networks this argument can also be given a string to omit insignificant edges. See description of this argumet in the next section (Additional options for correlation/covariance matrices).} \item{palette}{The palette used for coloring nodes when the \code{groups} argument is used. Can be one of \code{"rainbow"} (default), \code{"colorblind"} (making use of http://jfly.iam.u-tokyo.ac.jp/color/), \code{"pastel"}, \code{"gray"}, \code{"R"} and \code{"ggplot2"}.} \item{theme}{This argument sets different defaults for various graphical arguments (most notably \code{posCol}, \code{negCol} and \code{palette}). Can be \code{"classic"}, \code{"colorblind"}, \code{"gray"}, \code{"Hollywood"}, \code{"Borkulo"}, \code{"gimme"}, \code{"TeamFortress"}, \code{"Reddit"}, \code{"Leuven"} or \code{"Fried"}.} } } \section{Additional options for correlation/covariance matrices}{ \describe{ \item{graph}{Type of graph to be made when a correlation or covariance matrix is used as input. Setting this to other values than \code{"default"} will check if the matrix is a correlation or covariance matrix; if the matrix is not positive definite \code{\link[Matrix]{nearPD}} from the Matrix package will be used. Options are: \describe{ \item{"cor"}{Plots a correlation network. Runs \code{\link[stats]{cov2cor}} if input is detected to be a covariance matrix and plots the input as is} \item{"pcor"}{Plots a partial correlation network, using \code{cor2pcor} from the parcor package (Kraemer, Schaefer and Boulesteix, 2009) on the input matrix} \item{"glasso"}{Will run \code{\link{EBICglasso}} to obtain an optimal sparse estimate of the partial correlation matrix using the glasso package (Friedman, Hastie and Tibshirani, 2011)} } Outdated and limited supported options are \code{"factorial"}, which will create a graph based on an exploratory factor analysis, and \code{"sig"} will transform all correlations in p-values (using the fdrtool package; Korbinian Strimmer, 2014) and force mode="sig". "sig2" will do the same but show p-values based on negative statistics in shades of orange} \item{threshold}{In addition to a numeric value to omit edges this argument can also be assigned a string to omit insignficant edges. Note that this REMOVES edges from the network (which influences centrality measures and the spring layout). Can be \code{"sig"} to compute significance without correction for multiple testing, \code{"holm"}, \code{"hochberg"}, \code{"hommel"}, \code{"bonferroni"}, \code{"BH"}, \code{"BY"}, \code{"fdr"} or \code{"none"} which are used directly in the \code{adjust} argument in \code{\link[psych]{corr.p}} of the psych package (Revelle, 2014). In addition, this argument can be assigned \code{"locfdr"} in which edges are set to zero if the local FDR is below \code{FDRcutoff}. \code{\link[fdrtool]{fdrtool}} from the fdrtool package (Klaus and Strimmer, 2014) is used to compute these measures, which is used inside \code{\link{FDRnetwork}}.} \item{sampleSize}{The sample-size. Used when \code{graph = "glasso"} or \code{minimum = "sig"}} \item{tuning}{A tuning parameter used in estimation. Currently only used when \code{graph = "glasso"} and corresponds to the \code{gamma} argument} \item{lambda.min.ratio}{The minimal lambda ratio used in \code{\link{EBICglasso}}, defaults to 0.01.} \item{gamma}{Alias for tuning (overwrites the tuning argument).} \item{refit}{ Logical, should the optimal graph be refitted without LASSO regularization? Defaults to \code{FALSE} and only used if \code{graph = "glasso"}. } \item{countDiagonal}{ Should diagonal be counted in EBIC computation? Defaults to \code{FALSE}. Set to \code{TRUE} to mimic qgraph < 1.3 behavior (not recommended!). } \item{alpha}{The significance level (defaults to 0.05) to be used for not showing edges if \code{minimum = "sig"}.} \item{bonf}{Logical indicating if a bonferonni correction should be applied if \code{minimum = "sig"}.} \item{FDRcutoff}{Cutoff used in which partial correlations should be included if \code{graph = "fdr"}. Defaults to 0.9} } } \section{Output arguments}{ \describe{ \item{mar}{A vector of the form c(bottom, left, top, right) which gives the margins. Works similar to the argument in par(). Defaults to c(3,3,3,3)} \item{filetype}{A character containing the file type to save the output in. "R" outputs in a new R window, "pdf" creates a pdf file. "svg" creates a svg file (requires RSVGTipsDevice). "tex" creates LaTeX code for the graph (requires tikzDevice). 'jpg', 'tiff' and 'png' can also be used. If this is given any other string (e.g. filetype="") no device is opened. Defaults to 'R' if the current device is the NULL-device or no new device if there already is an open device. A function such as \code{x11} can also be used} \item{filename}{Name of the file without extension} \item{width}{Width of the plot, in inches} \item{height}{Height of the plot, in inches} \item{normalize}{Logical, should the plot be normalized to the plot size. If TRUE (default) border width, vertex size, edge width and arrow sizes are adjusted to look the same for all sizes of the plot, corresponding to what they would look in a 7 by 7 inches plot if normalize is FALSE.} \item{DoNotPlot}{Runs qgraph but does not plot. Useful for saving the output (i.e. layout) without plotting} \item{plot}{Logical. Should a new plot be made? Defaults to TRUE. Set to FALSE to add the graph to the existing plot.} \item{rescale}{Logical. Defines if the layout should be rescaled to fit the -1 to 1 x and y area. Defaults to TRUE. Can best be used in combination with plot=FALSE.} \item{standAlone}{Logical. If filetype="tex" this argument can be used to choose between making the output a standalone LaTeX file or only the codes to include the graph.} }} \section{Graphical arguments}{ \subsection{Nodes}{ These arguments influence the plotting of nodes in qgraph. Most of them can be assigned a single value or a vector with a value for each node. \describe{ \item{color}{A vector with a color for each element in the groups list, or a color for each node. Defaults to the background color ("bg" argument, which defaults to "white") without groups list and rainbow(length(groups)) with a groups list.} \item{vsize}{A value indicating the size of the nodes (horizontal if shape is "rectangle". Can also be a vector of length 2 (nodes are scaled to degree) or a size for each node. Defaults to 8*exp(-nNodes/80)+1} \item{vsize2}{A value indicating the vertical size of the nodes where the shape is "rectangle". Can also be a vector of length 2 (nodes are scaled to degree) or a size for each node. Defaults to the value of 'vsize'. If 'vsize' is not assigned this value is used as a scalar to 'vsize' (e.g., \code{vsize2 = 1/2} would result in rectangled nodes where the height is half the default width)} \item{node.width}{Scalar on the default value of 'vsize'. Defaults to 1.} \item{node.height}{Scalar on the default value of 'vsize2'. Defaults to 1.} \item{borders}{Logical indicating if borders should be plotted, defaults to TRUE.} \item{border.color}{Color vector indicating colors of the borders. Is repeated if length is equal to 1. Defaults to "black"} \item{border.width}{Controls the width of the border. Defaults to 2 and is comparable to 'lwd' argument in 'points'.} \item{shape}{A character containing the shape of the nodes. \code{"circle"}, \code{"square"}, \code{"triangle"} and \code{"diamond"} are supported. In addition, can be a name of an element of \code{polygonList} to plot the corresponding polygon (not reccomended for large graphs), which by default includes shapes \code{"ellipse"} and \code{"heart"} Can also be a vector with a shape for each node. Defaults to "circle".} \item{polygonList}{ A list contaning named lists for each element to include polygons to lookup in the \code{shape} argument. Each element must be named as they are used in \code{shape} and contain a list with elements \code{x} and \code{y} contaning the coordinates of the polygon. By default \code{ellipse} and \code{heart} are added to this list. These polygons are scaled according to \code{vsize} and \code{vsize2}} \item{vTrans}{Transparency of the nodes, must be an integer between 0 and 255, 255 indicating no transparency. Defaults to 255} \item{subplots}{A list with as elements R expressions or NULL for each node. If it is an R expression it is evaluated to create a plot for the node.} \item{subpars}{List of graphical parameters to be used in the subplots} \item{subplotbg}{Background to be used in the sublots. If missing inherits from 'background' argument.} \item{images}{A character vector of length 1 or the same length as the number of nodes indicating the file location of PNG or JPEG images to use as nodes. Can be NA to not plot an image as node and overwrites 'subplots' } \item{noPar}{Set to \code{TRUE} to not have qgraph run the \code{par} function. Useful when sending qgraph plots as sublots using \code{subplots}.} \item{pastel}{Logical, should default colors (for groups or edge equality constraints) be chosen from pastel colors? If TRUE then \code{\link[colorspace]{rainbow_hcl}} is used.} \item{rainbowStart}{A number between 0 and 1 indicating the offset used in rainbow functions for default node coloring.} \item{usePCH}{Logical indicating if nodes should be drawn using polygons or base R plotting symbols. Defaults to \code{TRUE} if more than 50 nodes are used in the graph or if the graph is stored in a file. See details.} \item{node.resolution}{Resolution of the nodes if \code{usePCH=FALSE}. Defaults to 100} \item{title}{String with a title to be drawn in the topleft of the plot.} \item{title.cex}{Size of the title, defaults to 1.} \item{preExpression}{A parsable string containing R codes to be evaluated after opening a plot and before drawing the graph.} \item{postExpression}{A parsable string containing R codes to be evaluated just before closing the device.} \item{diag}{Should the diagonal also be plotted as edges? defaults to FALSE. Can also be "col" to plot diagonal values as vertex colors.} } } \subsection{Node labels}{ These arguments influence the plotting of node labels in qgraph. Most of them can be assigned a single value or a vector with a value for each node. \describe{ \item{labels}{If FALSE, no labels are plotted. If TRUE, order in weights matrix is used as labels. This can also be a vector with a label for each node. Defaults for graphs with less than 20 nodes to a 3 character abbreviation of the columnames and rownames if these are identical or else to TRUE. If a label contains an asterisk (e.g. "x1*") then the asterisk will be omitted and the label will be printed in symbol font (use this for Greek letters). Can also be a list with a label as each element, which can be expressions for more advanced mathematical annotation.} \item{label.cex}{Scalar on the label size.} \item{label.color}{Character containing the color of the labels, defaults to "black"} \item{label.prop}{Controls the proportion of the width of the node that the label rescales to. Defaults to 0. 9.} \item{label.norm}{A single string that is used to normalize label size. If the width of the label is lower than the width of the hypothetical label given by this argument the width of label given by this argument is used instead. Defaults to "OOO" so that every label up to three characters has the same font size.} \item{label.scale}{Logical indicating if labels should be scaled to fit the node. Defaults to TRUE.} \item{label.scale.equal}{Logical, set to \code{TRUE} to make make the font size of all labels equal} \item{label.font}{Integer specifying the label font of nodes. Can be a vector with value for each node} \item{label.fill.vertical}{ Scalar (0 - 1) indicating the maximum proportion a label may fill a node vertically. } \item{label.fill.horizontal}{ Scalar (0 - 1) indicating the maximum proportion a label may fill a node horizontally. } \item{node.label.offset}{ A vector of length two with the x and y offset coordinates of the node label (e.g., \code{c(0.5, 0.5)} is the default and centers the label with respect to the node area). The vector is passed to the \code{adj} argument of \code{graphics::text} function. } \item{node.label.position}{ Either a numeric vector of length 1 (i.e., it gets recycled) or of length equal to the number of nodes in the network, used to set the positions of the node labels. Takes values between 1 and 4 as follows: 1 - bottom; 2 - left; 3 - top; 4 - right. Overrides the \code{node.label.offset} argument and values are passed to the \code{pos} argument of \code{graphics::text} function. Defaults to \code{NULL}. } } } \subsection{Edges}{ These arguments influence the plotting of edges qgraph. Most of them can be assigned a single value, a vector with a value per edge when an edgelist is used as input or a matrix containing values for each edge when a wheights matrix is used as input. \describe{ \item{esize}{Size of the largest edge (or what it would be if there was an edge with weight maximum). Defaults to 15*exp(-nNodes/90)+1) for weighted graphs and 2 for unweighted graphs. In directed graphs these values are halved.} \item{edge.width}{Scalar on 'esize' and 'asize' arguments to make edges wider with a single argument. 'esize' is multiplied with this value and 'asize' with the square root of this value.} \item{edge.color}{Color of edges. Can be either a single value to make all edges the same color, a matrix with a color for each edge (when using a weights matrix) or a vector with a color for each edge (when using an edgelist). NA indicates that the default color should be used. Note that unless \code{fade=FALSE} colors still fade to white corresponding to their strength} \item{posCol}{Color of positive edges. Can be a vector of two to indicate color of edges under 'cut' value and color of edges over 'cut' value. If 'fade is set to TRUE the first color will be faded the weaker the edge weight is. If this is only one element this color will also be used for edges stronger than the 'cut' value. Defaults to c("#009900","darkgreen")} \item{negCol}{Color of negative edges. Can be a vector of two to indicate color of edges under 'cut' value and color of edges over 'cut' value. If 'fade is set to TRUE the first color will be faded the weaker the edge weight is. If this is only one element this color will also be used for edges stronger than the 'cut' value. Defaults to c("#BF0000","red")} \item{unCol}{Color to indicate the default edge color of unweighted graphs. Defaults to "#808080".} \item{probCol}{Color of the probability edges. Defaults to \code{"blue"}. Only used when \code{probabilityEdges = TRUE}} \item{negDashed}{Logical, set to \code{TRUE} to make negative edges dashed (overwrites \code{lty}).} \item{probabilityEdges}{Logical, do edges indicate probabilities? If this is set to \code{TRUE} \code{posCol} is overwritten by \code{probCol}. Mainly implemented for automatic generation of graphs} \item{colFactor}{Exponent of transformation in color intensity of relative strength. Defaults to 1 for linear behavior.} \item{trans}{In weighted graphs: logical indicating if the edges should fade to white (FALSE) or become more transparent (TRUE; use this only if you use a background). In directed graphs this is a value between 0 and 1 indicating the level of transparency. (also used as 'transparency')} \item{fade}{if TRUE (default) and if 'edge.color' is assigned, transparency will be added to edges that are not transparent (or for which no transparency has been assigned) relative to the edge strength, similar if 'trans' is set to TRUE.} \item{loopRotation}{A vector with an element for each node with either \code{NA} to let qgraph choose the rotation of the loop, or the rotation of the loop per node in radian} \item{loop}{If diag=TRUE, this can be used to scale the size of the loop. defaults to 1.} \item{lty}{Line type, see 'par'} \item{edgeConnectPoints}{This argument specifies the point for each edge to which it connects to a node, in radians. Can be either a matrix with a row for each edge and two columns: The first column indicates the connection point of the source of the edge and the second column specifies the connection point of the destination of the edge. Can also be an array with a row and column for each node two slices which indicate the source and destination of the edge connecting the two nodes.} } } \subsection{Edge Curvature}{ These arguments control the curvature of edges. Most of them can be assigned a single value, a vector with a value per edge when an edgelist is used as input or a matrix containing values for each edge when a wheights matrix is used as input. \describe{ \item{curve}{A value indicating how strongly edges should be curved. Either a single value, a vector (edgelist input) with a value for each edge or a matrix (weights matrix input). NA indicates default curve behavior should be used, which only curves edges if there are multiple edges between two nodes.} \item{curveAll}{Logical, indicating if all edges should be curved with the value of the 'curve' or only edges between nodes that have share multiple edges.} \item{curveDefault}{The default curvature. Defaults to 1.} \item{curveShape}{The shape of the curve, as used in \code{xspline}. Defaults to \code{-1}.} \item{curveScale}{Logical, should curve scale with distance between nodes. Defaults to \code{TRUE}. If \code{FALSE}, the curve can be exactly determined. Recommended to set to \code{TRUE} for graphs and \code{FALSE} for diagrams. The curvature is corrected for the number of nodes and will be smaller if there are more nodes.} \item{curveScaleNodeCorrection}{Logical, set to \code{FALSE} to disable the node correction in \code{curveScale}. Defaults to \code{TRUE}. Not recommended. Set to \code{FALSE} ONLY if you know what you are doing.} \item{curvePivot}{Quantile to pivot curves on. This can be used to, rather than round edges, make straight edges as curves with "knicks" in them. Can be logical or numeric. \code{FALSE} (default) indicates no pivoting in the curved edges, a number indicates the quantile (and one minus this value as quantile) on which to pivot curved edges and \code{TRUE} indicates a value of 0.1.} \item{curvePivotShape}{The shape of the curve around the pivots, as used in \code{xspline}. Defaults to \code{0.25}.} \item{parallelEdge}{Logical, set to \code{TRUE} to draw parallel straight edges rather than curved edges when there are multiple edges between two nodes. Can be a vector with value per edge for edgelists or a matrix with a value per edge for weights marices.} \item{parallelAngle}{The distance in radians an edge is shifted if \code{parallel=TRUE}. Can be set to \code{NA} (default) to determine based on number of edges between two nodes. Can be a vector with value per edge for edgelists or a matrix with a value per edge for weights marices.} \item{parallelAngleDefault}{The default value for parallelAngle, indicating the angle of the edge furthest from the center. Defaults to \code{pi/6}} } } \subsection{Edge Labels}{ These arguments influence the plotting of edge labels qgraph. Most of them can be assigned a single value, a vector with a value per edge when an edgelist is used as input or a matrix containing values for each edge when a weights matrix is used as input. \describe{ \item{edge.labels}{If FALSE, no edge labels are plotted. If TRUE, numerical edge weights are printed on the edges. This can also be a vector with a label for each edge. Defaults to FALSE. If a label contains an asterisk (e.g. "y1*") then the asterisk will be omitted and the label will be printed in symbol font (use this for Greek letters). Can also be a list with a label as each element, which can be expressions for more advanced mathematical annotation.} \item{edge.label.cex}{Either a single number or a number per edge used as a scalar of the edge label size. Defaults to 1.} \item{edge.label.bg}{Either a logical or character vector/matrix. Indicates the background behind edge labels. If TRUE (default) a white background is plotted behind each edge label. If FALSE no background is plotted behind edge labels. Can also be a single color character, a vector or matrix of color vectors for each edge.} \item{edge.label.margin}{Margin of the background box around the edge label. Defaults to zero.} \item{edge.label.position}{Vector of numbers between 0 and 1 controlling the relative position of each edge label. Defaults to 0.5 for placing edge labels at the middle of the edge.} \item{edge.label.font}{Integer specifying the label font of edges. Can be a vector or matrix with value for each node} \item{edge.label.color}{Character vector indicating the color of the edge labels. It can be either a vector of length equal to the number of edges in the network or a single character color that will be applied to all edges.} } } \subsection{Layout}{ Arguments concerning the placement of nodes, in combination with 'layout'. \describe{ \item{repulsion}{Scalar on the default repulse radius in the spring layout. Defaults to 1. Setting this argument to lower values (e.g., 0.5) will cause nodes in the spring layout to repulse each other less. This is especially useful if a few unconnected nodes cause the giant component to visually be clustered too much in the same place.} \item{layout.par}{A list of arguments passed to \code{\link{qgraph.layout.fruchtermanreingold}} when \code{layout = "spring"} or to an igraph function when such a function is assigned to 'layout'. Defaults to \code{ list(repulse.rad = nNodes^(repulsion * 3))} if \code{layout = "spring"} and \code{list()} otherwise.} \item{layoutRound}{Logical, should weights be rounded (default 10 digits) before computing layouts? This will hopefully make sure different machines result in the same layout. Defaults to \code{TRUE}} \item{layout.control}{A scalar on the size of the circles created with the circular layout.} \item{aspect}{Should the original aspect ratio be maintained if rescale = TRUE? Defaults to FALSE. Set this to TRUE to keep the aspect ratio of the original layout (e.g. result from layout="spring").} \item{rotation}{A vector that can be used to rotate the circles created with the circular layout. Must contain the rotation in radian for each group of nodes. Defaults to zero for each group.} } } \subsection{Legend}{ Arguments to control the legend placed on the right side of the graph. \describe{ \item{legend}{Logical value indicating if a legend should be plotted. Defaults to TRUE if a groups object or nodeNames is supplied} \item{legend.cex}{Scalar of the legend. defaults to 1} \item{legend.mode}{Character string indicating the type of legend to be drawn. \code{"groups"} indicates the legend should be based on the \code{groups} object, \code{"names"} indicates the legend should be based on the \code{nodeNames} object, and \code{style1} and \code{style2} indicate the legend should be based on both. Defaults to \code{"style1"} if both \code{"groups"} and \code{"nodeNames"} arguments are used.} \item{GLratio}{Relative size of the graph compared to the layout. Defaults to 2.5} \item{layoutScale}{A vector with a scalar for respectively the x and y coordinates of the layout (which default plotting area is from -1 to 1 on both x and y axis). Setting this to e.g. c(2,2) would make the plot twice as big. Use this in combination with 'layoutOffset' and 'plot' arguments to define the graph placement on an existing plot.} \item{layoutOffset}{A vector with the offset to the x and coordinates of the center of the graph (defaults to (0,0)). Use this in combination with 'layoutScale' and 'plot' arguments to define the graph placement on an existing plot.} \item{nodeNames}{Names for each node, can be used to plot a legend next to the plot that links the node labels to node names.} } } \subsection{Background}{ These arguments control the background of the plot \describe{ \item{bg}{If this is TRUE, a background is plotted in which node colors cast a light of that color on a black background. Can also be a character containing the color of the background Defaults to FALSE} \item{bgcontrol}{The higher this is, the less light each node gives if bg=TRUE. Defaults to 6.} \item{bgres}{square root of the number of pixels used in bg=TRUE, defaults to 100.} } } \subsection{General graphical arguments}{ \describe{ \item{pty}{See 'par'} \item{gray}{Logical, set to TRUE to plot the graph in grayscale colors} % \item{tooltips}{A vector with tooltips for each node, only used when filetype='svg' or filetype='tex'} %\item{overlay}{Logical, should a Venn-diagram like overlay be plotted? If TRUE then for each group a x\% confidence region is plotted for the X and Y position, using \code{\link[ellipse]{ellipse}}} %\item{overlaySize}{Specifies the size of the overlay ellipses. Corresponds to the confidence level (default is 0.5)} \item{font}{Integer specifying the default font for node and edge labels} } } } \section{Arguments for directed graphs}{ \describe{ \item{directed}{Logical indicating if edges are directed or not. Can be TRUE or FALSE to indicate if all edges are directed, a logical vector (when using edgelists) or a logical matrix (when using weights matrix)} \item{arrows}{A logical indicating if arrows should be drawn, or a number indicating how much arrows should be drawn on each edge. If this is TRUE, a simple arrow is plotted, if this is a number, arrows are put in the middle of the edges.} \item{arrowAngle}{Angle of the arrowhead, in radians. Defaults to pi/8 for unweighted graphs and pi/4 for weighted graphs.} \item{asize}{Size of the arrowhead. Defaults to 2*exp(-nNodes/20)+2.} \item{open}{Logical indicating if open (TRUE) or closed (FALSE) arrowheads should be drawn.} \item{bidirectional}{If this is TRUE, Then directional edges between nodes that have two edges between them are not curved. Defaults to FALSE. Can also be a logical vector (when using edgelists) or a logical matrix (when using weights matrix)} }} \section{Arguments for graphs based on significance values}{ \describe{ \item{mode}{This argument defines the mode used for coloring the edges. The default, "strength" assumes each edge weight indicates the strength of connection centered around and makes positive edges green and negative edges red. If this is set to "sig" then the edge weights are assumed to be significance values and colored accordingly. This can also include negative values, which will be interpreted as p-values based on negative statistics.} \item{alpha}{The significance level (defaults to 0.05) to be used for not showing edges if \code{minimum = "sig"}, or if \code{Graph = "sig"} a vector of max 4 elements indicating the alpha level cutoffs. Defaults to c(0.0001,0.001,0.01,0.05)} \item{sigScale}{The function used to scale the edges if mode="sig". Defaults to $function(x)0.8*(1-x)^(log(0.4/0.8,1-0.05))$} \item{bonf}{Logical indicating if a bonferonni correction should be applied if \code{minimum = "sig"} or \code{mode="sig"}} }} \section{Arguments for plotting scores on nodes}{ \describe{ \item{scores}{This argument can be used to plot scores of an individual on the test. Should be a vector with the scores for each item. Currently this can only be integer values (e.g.\ LIKERT scales).} \item{scores.range}{Vector of length two indicating the range of the scores, if scores is assigned.} }} \section{Arguments for manually defining graphs}{ \describe{ \item{mode}{The mode argument (see section on significance graph arguments) can also be used to make the weights matrix correspond directly to the width of the edges (as in lwd of plot()). To do this, set mode to "direct".} \item{edge.color}{This argument can be used to overwrite the colors. Can be either a single value to make all edges the same color, a matrix with a color for each edge (when using a weights matrix) or a vector with a color for each edge (when using an edgelist). NA indicates that the default color should be used. Note that unless \code{fade=FALSE} colors still fade to white corresponding to their strength} }} \section{Arguments for knots (tying together edges)}{ \describe{ \item{knots}{This argument can be used to tie edges together in their center, which can be useful to, for example, indicate interaction effects. This argument can be assigned a list where each element is a vector containing the edge numbers that should be knotted together. Another option is to assign the argument a integer vector (for edgelists) or a matrix (for weight matrices) with 0 indicating edges that should not be tied together, and increasing numbers indicating each knot.} \item{knot.size}{The size of the knots. Can be of length one or a vector with the size of each knot. Similar to 'vsize'. Defaults to 1.} \item{knot.color}{The color of the knots. Can be of length one or a vector with the size of each knot. Defaults to NA, which will result in a mix of the knotted edge colors.} \item{knot.borders}{Logical indicating if a border should be plotted around the knot. Can be of length one or a vector with the size of each knot. Works similar to 'borders'. Defaults to FALSE} \item{knot.border.color}{Color of the knot borders. Can be of length one or a vector with the size of each knot. Works similar to 'border.color'. Defaults to "black"} \item{knot.border.width}{Width of the knot borders. Can be of length one or a vector with the size of each knot. Works similar to 'border.width'. Defaults to 1} }} \section{Arguments for bars}{ \describe{ \item{means}{ A vector with means for every node or \code{NA}. Will plot a vertical bar at the location of the mean between \code{meanRange} values. \code{NA} omits a bar. } \item{SDs}{ A vector with SDs for every node or \code{NA}. Will plot an error bar of 2 times this value around the \code{means} location. \code{NA} to omit. } \item{meanRange}{ The range of the \code{means} argument. Default to \code{range(means,na.rm=TRUE)} } \item{bars}{A list with for each node containing either NULL or a vector with values between 0 and 1 indicating where bars should be placed inside the node.} \item{barSide}{Integer for each node indicating at which side the bars should be drawn. 1, 2, 3 or 4 indicating at bottom, left, top or right respectively.} \item{barColor}{A vector with for each node indicating the color of bars. Defaults to the border color of the node.} \item{barLength}{A Vector indicating the relative length of bars of each node compared to the node size. Defaults to 0.5.} \item{barsAtSide}{Logical, should bars be drawn at the side of a node or at its center? Defaults to FALSE.} }} \section{Arguments for pies}{ \describe{ \item{pie}{ A vector with values between 0 and 1 for each node (or one value for all nodes). Supplying this argument will make the border of nodes a pie chart. Can also be a list with vectors to make pie charts of multiple parts.} \item{pieBorder}{ The size of the pie chart in the border, between 0 and 1. Defaults to \code{0.15}. Set to 1 to make the whole node a pie chart. Can be a vector with a value for each node. } \item{pieColor}{ Colors of the pie plot parts. Can be a vector with a value for each node, or a list with multiple values if there are more parts. } \item{pieColor2}{ Final color of the pie chart. Only added if the values in the 'pie' argument do not add up to 1. Defaults to \code{'white'}. Can be a vector with a value for each node. } \item{pieStart}{ A vector with values between 0 and 1 for each node (or one value for all nodes), indicating the starting point of the pie chart. } \item{pieDarken}{ A vector with values between 0 and 1 for each node (or one value for all nodes), indicating how much darker the pie border color is made than the node color in the default coloring scheme. } \item{piePastel}{ Should pastel colors be used to fill pie chart parts when more than 2 blocks are used? } \item{pieCImid}{ A vector with values between 0 and 1 for each node (or one value for all nodes), indicating the center point of the confidence region. Overwrites the \code{pie} argument } \item{pieCIlower}{ A vector with values between 0 and 1 for each node (or one value for all nodes), indicating the lower bound of the confidence region. Overwrites the \code{pie} argument } \item{pieCIupper}{ A vector with values between 0 and 1 for each node (or one value for all nodes), indicating the upper bound of the confidence region. Overwrites the \code{pie} argument } \item{pieCIpointcex}{ A vector with values between 0 and 1 for each node (or one value for all nodes), indicating the size of the point estimate of the confidence region. Overwrites the \code{pie} argument. Defaults to 0.01. } \item{pieCIpointcex}{ A vector with values between 0 and 1 for each node (or one value for all nodes), indicating the color of the point estimate of the confidence region. Overwrites the \code{pie} argument. Defaults to \code{"black"}. } } } %\section{Arguments that support the BDgraph package}{ %\describe{ %\item{BDgraph}{A character vector containing \code{"phat"} for plotting posterior probabilities of each edge, \code{"Khat"} for plotting mean posterior partial correlations or both. The order matters as the layout of the second plot is based on the layout of the first plot. Defaults to \code{c("phat","Khat")}} %\item{BDtitles}{Logical, should titles be drawn above the plots?} %}} \section{Additional arguments}{ \describe{ \item{edgelist}{Logical, if TRUE 'input' is assumed to be an edgelist, else if FALSE input is assumed to be a weights matrix. By default this is chosen automatically based on the dimensions of 'input' and this argument is only needed if the dimensions are ambiguous (square matrix with 2 or 3 rows/columns)} \item{weighted}{Logical that can be used to force either a weighted graph (TRUE) or an unweighted graph(FALSE).} \item{nNodes}{The number of nodes, only needs to be specified if the first argument is an edge-list and some nodes have no edges} \item{XKCD}{If set to TRUE the graph is plotted in XKCD style based on http://stackoverflow.com/a/12680841/567015.} }} \details{ Because of the amount of arguments the usage of the qgraph function has been reduced by using the ... method for clarity. This does mean that arguments need to be specified by using their exact name. For instance, to specify color="red" you can not use col="red". Important to note is that qgraph needs to compute in many graphs where the border of nodes are in the plotting area. If the graph is manually rescaled (such as through the "zoom" option in RStudio) the plotting area is changed. This means that the computed location of the border of nodes is no longer valid if the nodes are to remain perfectly square or circular. To overcome this, the \code{usePCH} argument can be used. If this argument is set to \code{FALSE} nodes will be plotted as polygons meaning they will rescale with rescaling the graph (circles can become ovals) and not have perfect resolution in PDF files. If \code{usePCH} is set to \code{TRUE} a default plotting symbol is used meaning the graph can not be rescaled but the node will look good in PDF. By defaut, \code{qgraph} sets \code{usePCH} to \code{TRUE} if it detects the graph is stored in a file. While the \code{usePCH} argument makes graphs rescalable it is not a perfect solution. It is highly recommended to NOT RESCALE PLOTTING AREAS when using qgraph, or to rerun qgraph after the plotting area is rescaled. This means using save graph option fro RStudio shoud be avoided in favor of the \code{filetype} argument in \code{qgraph} } \section{Using qgraph to plot graphs}{ The first argument of qgraph(), 'input', is the input. This can be a number of objects but is mainly either a weights matrix or an edgelist. Here we will assume a graph is made of n nodes connected by m edges. qgraph is mainly aimed at visualizing (statistical) relationships between variables as weighted edges. In these edge weights a zero indicates no connection and negative values are comparable in strength to positive values. Many (standardized) statistics follow these rules, the most important example being correlations. In the special case where all edge weights are either 0 or 1 the weights matrix is interpreted as an adjacency matrix and an unweighted graph is made. a weights matrix is a square n by n matrix in which each row and column represents a node. The element at row i and column j indicates the connection from node i to node j. If the weights matrix is symmetrical an undirected graph is made and if the matrix is asymmetrical a directed graph is made. Alternatively an edgelist can be used. This is a m by 2 matrix (not a list!) in which each row indicates an edge. The first column indicates the number of the start of the edge and the second column indicates the number of the end of the edge. The number of each node is a unique integer between 1 and n. The total number of nodes will be estimated by taking the highest value of the edgelist. If this is incorrect (there are nodes with no edges beyond the ones already specified) the 'nNodes' argument can be used. If an integer between 1 and n is missing in the edgelist it is assumed to be a node with no edges. To create a weighted graph edge weights can be added as a third column in the edgelist. By default using an edgelist creates a directed graph, but this can be set with the 'directed' argument. } \section{Interpreting graphs}{ In weighted graphs green edges indicate positive weights and red edges indicate negative weights. The color saturation and the width of the edges corresponds to the absolute weight and scale relative to the strongest weight in the graph. It is possible to set this strongest edge by using the 'maximum' argument. When 'maximum' is set to a value above any absolute weight in the graph that value is considered the strongest edge (this must be done to compare different graphs; a good value for correlations is 1). Edges with an absolute value under the 'minimum' argument are omitted (useful to keep filesizes from inflating in very large graphs). In larger graphs the above edge settings can become hard to interpret. With the 'cut' argument a cutoff value can be set which splits scaling of color and width. This makes the graphs much easier to interpret as you can see important edges and general trends in the same picture. Edges with absolute weights under the cutoff score will have the smallest width and become more colorful as they approach the cutoff score, and edges with absolute weights over the cutoff score will be full red or green and become wider the stronger they are. } \section{Specifying the layout}{ The placement of the nodes (i.e. the layout) is specified with the 'layout' argument. It can be manually specified by entering a matrix for this argument. The matrix must have a row for each node and two columns indicating its X and Y coordinate respectively. qgraph plots the nodes on a (-1:1)(-1:1) plane, and the given coordinates will be rescaled to fit this plane unless 'rescale' is FALSE (not recommended). Another option to manually specify the layout is by entering a matrix with more then two columns. This matrix must then consist of zeroes and a number (the order in the weights matrix) for each node indicating it's place. For example: 0 0 2 0 0 1 0 3 0 4 will place node 2 at the top in the center, node 1 at the bottom left corner, node 3 at the bottom in the center and node 4 at the bottom right corner. It is recommended however that one of the integrated layouts is used. 'layout' can be given a character as argument to accomplish that. layout="circular" will simply place all nodes in a circle if the groups argument is not used and in separate circles per group if the groups argument is used (see next section). The circular layout is convenient to see how well the data conforms to a model, but to show how the data clusters another layout is more appropriate. By specifying layout="spring" the Fruchterman-reingold algorithm (Fruchterman & Reingold, 1991), which has been ported from the SNA package (Butts, 2010), can be used to create a force-directed layout. In principle, what this function does is that each node (connected and unconnected) repulse each other, and connected nodes also attract each other. Then after a number of iterations (500 by default) in which the maximum displacement of each node becomes smaller a layout is achieved in which the distance between nodes correspond very well to the absolute edge weight between those nodes. A solution to use this function for weighted graphs has been taken from the igraph package (Csardi G & Nepusz T, 2006) in which the same function was ported from the SNA package. New in qgraph are the option to include constraints on the nodes by fixing a coordinate for nodes or reducing the maximum allowed displacement per node. This can be done with the 'layout.par' argument. For more information see \code{\link{qgraph.layout.fruchtermanreingold}}. By default, 'layout' is set to "spring" for unweighted and directed graphs and "circular" otherwise. } \section{Grouping nodes}{ Grouping nodes (e.g., according to a measurement model) can be specified with the 'groups' argument. This can be a factor or a list in which each element is a vector containing the numbers of nodes that belong together (numbers are taken from the order in the weights matrix). All numbers must be included. If a groups list is specified the "groups" layout can be used to place these nodes together, the nodes in each group will be given a color, and a legend can be plotted (by setting 'legend' to TRUE). The colors will be taken from the 'color' argument, or be generated with the \code{\link{rainbow}} function.} \section{Output}{ By default qgraph will plot the graph in a new R window. However the graphs are optimized to be plotted in a PDF file. To easily create a pdf file set the 'filetype' argument to "pdf". 'filename' can be used to specify the filename and folder to output in. 'height' and 'width' can be used to specify the height and width of the image in inches. By default a new R window is opened if the current device is the NULL-device, otherwise the current device is used (note that when doing this 'width' and 'height' still optimize the image for those widths and heights, even though the output screen size isn't affected, this is especially important for directed graphs!). Furthermore filetype can also be set to numerous other values. Alternatively any output device in R can be used by simply opening the device before calling qgraph and closing it with dev.off() after calling qgraph. IMPORTANT NOTE: graphs made in qgraph must be exported programatically using device functions such as pdf() and png(). Manually resizing a graph and using export functions such as the one built into RStudio will give UNSTABLE RESULTS.} \section{Manual specification of color and width}{ In qgraph the widths and colors of each edge can also be manually controlled. To directly specify the width of each edge set the 'mode'' argument to "direct". This will then use the absolute edge weights as the width of each edge (negative values can still be used to make red edges). To manually set the color of each edge, set the 'edge.color' argument to a matrix with colors for each edge (when using a weights matrix) or a vector with a color for each edge (when using an edgelist). } \section{Replotting graphs and reusing layouts}{ If the result of \code{\link{qgraph}} is stored, such as \code{Graph <- qgraph(...)}, the plot can be recreated in two ways. \code{qgraph(Graph, ...))} reruns \code{qgraph} with the same arguments used in the origina call except those restated in the dots. For example \code{qgraph(Graph, shape = "square")} will recreate the same plot but now use square nodes instead of circular. \code{plot(Graph)} will NOT rerun \code{qgraph} but simply plot the qgraph object. This means that now specific graph attributes can be changed before plotting. More specific, \code{qgraph(Graph)} will base the new plot only on the \code{Arguments} element of the \code{qgraph} object and \code{plot(qgraph)} will base the new plot on the \code{graphAttributes} and \code{plotOptions} elements of the qgraph object. To reuse a layout, use the \code{layout} element. e.g., to plot a new graph with the same layout use \code{qgraph(..., layout = Graph$layout)} } \section{Additional information}{ By default, edges will be straight between two nodes unless there are two edges between two nodes. To overwrite this behavior the 'bidirectional' argument can be set to TRUE, which will turn two edges between two nodes into one bidirectional edge. 'bidirectional' can also be a vector with TRUE or FALSE for each edge. To specify the strength of the curve the argument 'curve' can be used (but only in directional graphs). 'curve' must be given a numerical value that represent an offset from the middle of the straight edge through where the curved edge must be drawn. 0 indicates no curve, and any other value indicates a curve of that strength. A value of 0.3 is recommended for nice curves. This can be either one number or a vector with the curve of each edge. Nodes and edges can be given labels with the 'labels' and the 'edge.labels' arguments. 'labels' can be set to FALSE to omit labels, TRUE (default) to set labels equal to the node number (order in the weights matrix) or it can be a vector with the label for each node. Edge labels can also be set to FALSE to be omitted (default). If 'edge.labels' is TRUE then the weight of each label is printed. Finally, 'edge.labels' can also be a vector with the label for each edge. If a label (both for edges and nodes) contain an asterisk then the asterisk is omitted and that label is printed in the symbol font (useful to print Greek letters). A final two things to try: the 'scores' argument can be given a vector with the scores of a person on each variable, which will then be shown using colors of the nodes, And the 'bg' argument can be used to change the background of the graph to another color, or use bg=TRUE for a special background (do set transparency=TRUE when using background colors other then white).} \section{Debugging}{ If this function crashes for any reason with the filetype argument specified, run: dev.off() To shut down the output device!} \value{ qgraph returns (invisibly) a 'qgraph' object containing: \item{Edgelist}{A list containing for each edge the node of origin, node of destination, weight en wether the edge is directed and bidirectional.} \item{Arguments}{A list containing the arguments used in the \code{qgraph} call.} \item{plotOptions}{A list containing numerous options used in the plotting method.} \item{graphAttributes}{A list containing numerous attributes for nodes, edges and the entire graph} \item{layout}{A matrix containing the layout used in the plot} \item{layout.orig}{A matrix containing the original (unscaled) layout.} } \references{ Carter T. Butts (2010). sna: Tools for Social Network Analysis. R package version 2.2-0. http://CRAN.R-project.org/package=sna Csardi G, Nepusz T (2006). The igraph software package for complex network research, InterJournal, Complex Systems 1695. http://igraph.sf.net Sacha Epskamp, Angelique O. J. Cramer, Lourens J. Waldorp, Verena D. Schmittmann, Denny Borsboom (2012). qgraph: Network Visualizations of Relationships in Psychometric Data. Journal of Statistical Software, 48(4), 1-18. URL http://www.jstatsoft.org/v48/i04/. Jerome Friedman, Trevor Hastie and Rob Tibshirani (2011). glasso: Graphical lasso-estimation of Gaussian graphical models. R package version 1.7. http://CRAN.R-project.org/package=glasso Bernd Klaus and Korbinian Strimmer. (2014). fdrtool: Estimation of (Local) False Discovery Rates and Higher Criticism. R package version 1.2.12. http://CRAN.R-project.org/package= Fruchterman, T. & Reingold, E. (1991). Graph drawing by force-directed placement. Software - Pract. Exp. 21, 1129-1164. N. Kraemer, J. Schaefer. A.-L. Boulesteix (2009). Regularized Estimation of Large-Scale Gene Regulatory Networks using Gaussian Graphical Models BMC Bioinformatics 10:384 Plate, T. and based on RSvgDevice by T Jake Luciani (2009). RSVGTipsDevice: An R SVG graphics device with dynamic tips and hyperlinks. R package version 1.0-1. Revelle, W. (2014) psych: Procedures for Personality and Psychological Research, Northwestern University, Evanston, Illinois, USA, http://CRAN.R-project.org/package=psych Version = 1.4.4. } \author{ Sacha Epskamp } \seealso{ \code{\link{cor_auto}} \code{\link{qgraph.animate}} \code{\link{qgraph.loadings}} } \examples{ \dontrun{ ### Correlations ### # Load big5 dataset: data(big5) data(big5groups) # Compute correlation matrix: big5_cors <- cor_auto(big5, detectOrdinal = FALSE) # Correlations: big5Graph <- qgraph(cor(big5),minimum=0.25,groups=big5groups, legend=TRUE,borders=FALSE, title = "Big 5 correlations") # Same graph with spring layout: qgraph(big5Graph,layout="spring") # Same graph with different color scheme: qgraph(big5Graph,posCol="blue",negCol="purple") ### Network analysis ### ### Using bfi dataset from psych ### library("psych") data(bfi) # Compute correlations: CorMat <- cor_auto(bfi[,1:25]) # Compute graph with tuning = 0 (BIC): BICgraph <- qgraph(CorMat, graph = "glasso", sampleSize = nrow(bfi), tuning = 0, layout = "spring", title = "BIC", details = TRUE) # Compute graph with tuning = 0.5 (EBIC) EBICgraph <- qgraph(CorMat, graph = "glasso", sampleSize = nrow(bfi), tuning = 0.5, layout = "spring", title = "BIC", details = TRUE) # Compare centrality and clustering: centralityPlot(list(BIC = BICgraph, EBIC = EBICgraph)) clusteringPlot(list(BIC = BICgraph, EBIC = EBICgraph)) # Compute centrality and clustering: centrality_auto(BICgraph) clustcoef_auto(BICgraph) ### Directed unweighted graphs ### set.seed(1) adj=matrix(sample(0:1,10^2,TRUE,prob=c(0.8,0.2)),nrow=10,ncol=10) qgraph(adj) title("Unweighted and directed graphs",line=2.5) # Save plot to nonsquare pdf file: qgraph(adj,filetype='pdf',height=5,width=10) #### EXAMPLES FOR EDGES UNDER DIFFERENT ARGUMENTS ### # Create edgelist: dat.3 <- matrix(c(1:15*2-1,1:15*2),,2) dat.3 <- cbind(dat.3,round(seq(-0.7,0.7,length=15),1)) # Create grid layout: L.3 <- matrix(1:30,nrow=2) # Different esize: qgraph(dat.3,layout=L.3,directed=FALSE,edge.labels=TRUE,esize=14) # Different esize, strongest edges omitted (note how 0.4 edge is now # just as wide as 0.7 edge in previous graph): qgraph(dat.3[-c(1:3,13:15),],layout=L.3,nNodes=30,directed=FALSE, edge.labels=TRUE,esize=14) # Different esize, with maximum: qgraph(dat.3,layout=L.3,directed=FALSE,edge.labels=TRUE,esize=14,maximum=1) title("maximum=1",line=2.5) qgraph(dat.3[-c(1:3,13:15),],layout=L.3,nNodes=30,directed=FALSE,edge.labels=TRUE, esize=14,maximum=1) title("maximum=1",line=2.5) # Different minimum qgraph(dat.3,layout=L.3,directed=FALSE,edge.labels=TRUE,esize=14,minimum=0.1) title("minimum=0.1",line=2.5) # With cutoff score: qgraph(dat.3,layout=L.3,directed=FALSE,edge.labels=TRUE,esize=14,cut=0.4) title("cut=0.4",line=2.5) # With details: qgraph(dat.3,layout=L.3,directed=FALSE,edge.labels=TRUE,esize=14,minimum=0.1, maximum=1,cut=0.4,details=TRUE) title("details=TRUE",line=2.5) # Trivial example of manually specifying edge color and widths: E <- as.matrix(data.frame(from=rep(1:3,each=3),to=rep(1:3,3),width=1:9)) qgraph(E,mode="direct",edge.color=rainbow(9)) ### Input based on other R objects ### ## pcalg # Example from pcalg vignette: library("pcalg") data(gmI) suffStat <- list(C = cor(gmI$x), n = nrow(gmI$x)) pc.fit <- pc(suffStat, indepTest=gaussCItest, p = ncol(gmI$x), alpha = 0.01) qgraph(pc.fit) ## glasso: # Using bfi dataset from psych: library("psych") data(bfi) cor_bfi <- cor_auto(bfi[,1:25]) # Run qgraph: library("glasso") bfi_glasso <- glasso(cor_bfi, 0.1) # Plot: qgraph(bfi_glasso, layout = "spring") } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ Graphs } \keyword{ qgraph } \keyword{ Correlations} qgraph/man/getWmat.Rd0000644000176200001440000000162514430573263014221 0ustar liggesusers\name{getWmat} \alias{getWmat} \alias{getWmat.matrix} \alias{getWmat.data.frame} \alias{getWmat.igraph} \alias{getWmat.qgraph} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Obtain a weights matrix } \description{ This function extracts a weights matrix from various kinds of objects. } \usage{ \method{getWmat}{matrix}(x,nNodes,labels, directed = TRUE,\dots) \method{getWmat}{data.frame}(x,nNodes,labels, directed = TRUE,\dots) \method{getWmat}{igraph}(x,labels,\dots) \method{getWmat}{qgraph}(x,directed,\dots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ An input object } \item{nNodes}{ Number of Nodes } \item{labels}{ A vector specifying the labels of each node } \item{directed}{ Logical indicating if the graph should be directed } \item{\dots}{ Ignored } } \value{ A weights matrix } \author{ Sacha Epskamp } qgraph/man/plot.qgraph.Rd0000644000176200001440000000215514430573263015047 0ustar liggesusers\name{plot.qgraph} \alias{plot.qgraph} \title{ Plot method for "qgraph" } \description{ Plots an object created by \code{\link{qgraph}}. } \usage{ \method{plot}{qgraph}(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A "qgraph" object } \item{\dots}{ Not used } } \details{ If the result of \code{\link{qgraph}} is stored, such as \code{Graph <- qgraph(...)}, the plot can be recreated in two ways. \code{qgraph(Graph, ...))} reruns \code{qgraph} with the same arguments used in the origina call except those restated in the dots. For example \code{qgraph(Graph, shape = "square")} will recreate the same plot but now use square nodes instead of circular. \code{plot(Graph)} will NOT rerun \code{qgraph} but simply plot the qgraph object. This means that now specific graph attributes can be changed before plotting. More specific, \code{qgraph(Graph)} will base the new plot on the \code{Arguments} element of the \code{qgraph} object and \code{plot(qgraph)} will base the new plot on the \code{graphAttributes} element of qgraph. } \author{ Sacha Epskamp (mail@sachaepskamp.com) } qgraph/man/smallworldIndex.Rd0000644000176200001440000000116714430573263015762 0ustar liggesusers\name{smallworldIndex} \alias{smallworldIndex} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Small-world index of unweighted graph } \description{ Computes the small-world index of an unweighted graph. When the graph is weighted, weights are removed and every nonzero edge weight is set to 1. } \usage{ smallworldIndex(x) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A qgraph object. } } \references{ Watts, D. J., & Strogatz, S. H. (1998). Collective dynamics of 'small-world' networks. nature, 393(6684), 440-442. } \author{ Sacha Epskamp } qgraph/man/FDRnetwork.Rd0000644000176200001440000000540314430573263014634 0ustar liggesusers\name{FDRnetwork} \alias{FDRnetwork} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Model selection using local False Discovery Rate } \description{ This function is a wrapper arounf \code{\link[fdrtool]{fdrtool}} to easilly compute a correlation or partial correlation network in which all nonsignificant edges are set to zero. } \usage{ FDRnetwork(net, cutoff = 0.1, method = c('lfdr', 'pval', 'qval')) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{net}{ A correlation or partial correlation matrix } \item{cutoff}{ The cutoff value to use. The edges of which the value of the first element of \code{method} are higher than the cutoff are removed. Thus, by default, edges with a local false discovery rate of higher than 0.1 are removed from the graph. } \item{method}{The method to use with the cutoff. Can be \code{'lfdr'} for the local false discobvery rate, \code{'pval'} for the p-value of \code{'qval'} for the q-value.} } \details{ \code{method = 'lfdr'} could result in a very sparse network, so also looking at other values is advisable. } \references{ Bernd Klaus and Korbinian Strimmer. (2014). fdrtool: Estimation of (Local) False Discovery Rates and Higher Criticism. R package version 1.2.12. http://CRAN.R-project.org/package=fdrtool } \author{ Sacha Epskamp } \examples{ \dontrun{ ### Using bfi dataset from psych ### library("psych") data(bfi) ### CORRELATIONS ### # Compute correlations: CorMat <- cor_auto(bfi[,1:25]) # Run local FDR: CorMat_FDR <- FDRnetwork(CorMat) # Number of edges remaining: mean(CorMat_FDR[upper.tri(CorMat_FDR,diag=FALSE)]!=0) # None, so might use different criterion: CorMat_FDR <- FDRnetwork(CorMat, method = "pval") # Compare: L <- averageLayout(CorMat, CorMat_FDR) layout(t(1:2)) qgraph(CorMat, layout = L, title = "Correlation network", maximum = 1, cut = 0.1, minimum = 0, esize = 20) qgraph(CorMat_FDR, layout = L, title = "Local FDR correlation network", maximum = 1, cut = 0.1, minimum = 0, esize = 20) # Centrality: centralityPlot(list(cor=CorMat, fdr = CorMat_FDR)) ### PARTIAL CORRELATIONS ### # Partial correlation matrix: library("parcor") PCorMat <- cor2pcor(CorMat) # Run local FDR: PCorMat_FDR <- FDRnetwork(PCorMat, cutoff = 0.1, method = "pval") # Number of edges remaining: mean(PCorMat_FDR[upper.tri(PCorMat_FDR,diag=FALSE)]!=0) # Compare: L <- averageLayout(PCorMat, PCorMat_FDR) layout(t(1:2)) qgraph(PCorMat, layout = L, title = "Partial correlation network", maximum = 1, cut = 0.1, minimum = 0, esize = 20) qgraph(PCorMat_FDR, layout = L, title = "Local FDR partial correlation network", maximum = 1, cut = 0.1, minimum = 0, esize = 20) # Centrality: centralityPlot(list(cor=PCorMat, fdr = PCorMat_FDR)) } } qgraph/man/mat2vec.Rd0000644000176200001440000000107414430573263014150 0ustar liggesusers\name{mat2vec} \alias{mat2vec} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Weights matrix to vector } \description{ Converts a weights matrix to a vector of weights. If the matrix is symmetrical only upper triangle values are returned in the vector. } \usage{ mat2vec(x, diag = FALSE, tol = 1e-10) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A weights matrix } \item{diag}{ Logical: should diagonal values be included? } \item{tol}{ Tolerance level } } \author{ Sacha Epskamp } qgraph/man/as.igraph.Rd0000644000176200001440000000145114430573263014462 0ustar liggesusers\name{as.igraph.qgraph} \alias{as.igraph.qgraph} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Converts qgraph object to igraph object. } \description{ This function converts the output of \code{\link{qgraph}} to an 'igraph' object that can be used in the igraph package (Csardi & Nepusz, 2006) } \usage{ \method{as.igraph}{qgraph}(x, ..., attributes = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ A \code{"qgraph"} object } \item{\dots}{Not used.} \item{attributes}{ Logical, should graphical attributes also be transferred? } } \references{ Csardi G, Nepusz T (2006). The igraph software package for complex network research, InterJournal, Complex Systems 1695. http://igraph.sf.net } \author{ Sacha Epskamp } qgraph/man/qgraph.layout.fruchtermanreingold.Rd0000644000176200001440000001117514521125565021450 0ustar liggesusers\encoding{UTF-8} \name{qgraph.layout.fruchtermanreingold} \alias{qgraph.layout.fruchtermanreingold} %- Also NEED an '\alias' for EACH other topic documented here. \title{qgraph.layout.fruchtermanreingold} \description{ This is a wrapper for the function that returns the x and y coordinates of the graph based on the Fruchterman Reingold algorithm (Fruchterman & Reingold, 1991), which was ported from the SNA package (Butts, 2010). This function is used in \code{\link{qgraph}} and is not designed to be used separately. See details for using constraints in this layout.} \usage{ qgraph.layout.fruchtermanreingold(edgelist, weights=NULL, vcount=NULL, niter=NULL, max.delta=NULL, area=NULL, cool.exp=NULL, repulse.rad=NULL, init=NULL, groups=NULL, rotation=NULL, layout.control=0.5, constraints=NULL, round = TRUE, digits = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{edgelist}{A matrix with on each row the nodes at the start and the node at the end of each edge.} \item{weights}{A vector containing the edge weights.} \item{vcount}{The number of nodes.} \item{niter}{Number of iterations, default is 500.} \item{max.delta}{Maximum displacement, default is equal to the number of nodes.} \item{area}{The area of the plot, default is the square of the number of nodes.} \item{cool.exp}{Cooling exponent, default is 1.5.} \item{repulse.rad}{Repulse radius, defaults to the cube of the number of nodes.} \item{init}{Matrix with two columns and a row for each node containing the initial X and Y positions.} \item{groups}{See \code{\link{qgraph}}} \item{rotation}{See \code{\link{qgraph}}} \item{layout.control}{See \code{\link{qgraph}}} \item{constraints}{A constraints matrix with two columns and a row for each node containing a NA if the node is free or a fixed value for one of the coordinates.} \item{round}{Logical indicating if the initial input should be rounded} \item{digits}{Number of digits to round initial input and displacement in the algorithm to. Defaults to 5. This helps prevent floating point disrepancies between different operating systems.} } \details{ All arguments for this function can be passed from \code{\link{qgraph}} to this function by using the 'layout.par' argument, which must be a list containing the arguments. This can be used to constrain the layout in two ways: } \section{Hard constraints}{ By using the 'constraints' argument the X and Y positions of each node can be fixed to a certain value. The 'constraint' argument must be given a matrix with two columns and a row for each node. An NA means that that coordinate for that node is free, and a value means it is fixed to that value.} \section{Soft constraints}{ Soft constraining can be done by varying the 'max.delta' argument. This can be a single number, but also a vector containing the maximum displacement per step for each node. The default value is the number of nodes, so by setting this to a lower value for some nodes the node won't move so much. Use this in combination with the 'init' argument to make sure nodes don't move too much from their initial setup. This can be useful when adding a new node to an existing network and if you don't want the network to completely change.} \references{ Sacha Epskamp, Angelique O. J. Cramer, Lourens J. Waldorp, Verena D. Schmittmann, Denny Borsboom (2012). qgraph: Network Visualizations of Relationships in Psychometric Data. Journal of Statistical Software, 48(4), 1-18. URL http://www.jstatsoft.org/v48/i04/. Carter T. Butts (2010). sna: Tools for Social Network Analysis. R package version 2.2-0. http://CRAN.R-project.org/package=sna Fruchterman, T. & Reingold, E. (1991). Graph drawing by force-directed placement. Software - Pract. Exp. 21, 1129?1164. } \author{ Sacha Epskamp (mail@sachaepskamp.com) } \seealso{ \code{\link{qgraph}} } \examples{ \dontrun{ # This example makes a multipage PDF that contains images # Of a building network using soft constraints. # Each step one node is added with one edge. The max.delta # decreases the longer nodes are present in the network. pdf("Soft Constraints.pdf",width=10,height=5) adj=adjO=matrix(0,nrow=3,ncol=3) adj[upper.tri(adj)]=1 Q=qgraph(adj,vsize=3,height=5,width=10,layout="spring", esize=1,filetype='',directed=T) cons=Q$layout for (i in 1:20) { x=nrow(adj) adjN=matrix(0,nrow=x+1,ncol=x+1) adjN[1:x,1:x]=adj consN=matrix(NA,nrow=x+1,ncol=2) consN[1:x,]=cons[1:x,] layout.par=list(init=rbind(cons,c(0,0)), max.delta=10/(x+1):1,area=10^2,repulse.rad=10^3) y=sample(c(x,sample(1:(x),1)),1) adjN[y,x+1]=1 Q=qgraph(adjN,Q,layout="spring",layout.par=layout.par) cons=Q$layout adj=adjN } dev.off() } } qgraph/man/clustcoef_auto.Rd0000644000176200001440000001241714430573263015631 0ustar liggesusers\name{clustcoef_auto} \alias{clustcoef_auto} \alias{clustWS} \alias{clustZhang} \alias{clustOnnela} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Local clustering coefficients. } \description{ Compute local clustering coefficients, both signed and unsigned and both for weighted and for unweighted networks. } \usage{ clustcoef_auto(x, thresholdWS = 0, thresholdON = 0) clustWS(x, thresholdWS=0) clustZhang(x) clustOnnela(x, thresholdON=0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{An undirected graph. Can be a \code{qgraph} object, an \code{igraph} object, an adjacency matrix, a weight matrix and an edgelist, or a weighted edgelist.} \item{thresholdWS}{ The threshold used to binarize a weighted network \code{x} to compute the binary clustering coefficients \code{clustWS} and \code{signed_clustWS}. Edges with weights lower than \code{thresholdWS} in absolute value are zeroed. For unweighted networks, \code{thresholdWS = 0} is the suggested value. } \item{thresholdON}{ In the computation of Onnela's clustering coefficient \code{clustOnnela}, edge of weights lower than \code{thresholdON} in absolute value are excluded. The value \code{thresholdON = 0} (i.e., no edge is excluded) is generally suggested also for weighted networks. } } \details{ \code{clustWS} computes the clustering coefficient for unweighted networks introduced by Watts & Strogatz (1998) and the corresponding signed version (Costantini & Perugini, in press). \code{ClustZhang} computes the clustering coefficient for weighted networks introduced by Zhang & Horvath (2005) and the corresponding signed version (Costantini & Perugini, in press). \code{clustOnnela} computes the clustering coefficient for weighted networks introduced by Onnela et al. (2005) and the corresponding signed version (Costantini & Perugini, in press). \code{clustering_auto} automatically recognizes the kind of the input network \code{x} (weighted vs. unweighted, signed vs. unsigned) and computes a subset of indices according to the kind of the network: signed indices are not computed for unsigned networks and weighted indices are not computed for unweighted networks. However the unsigned indices are computed for signed networks, by considering the absolute value of the weights, and the unweighted indices are computed for weighted networks, after a binarization according to the parameter \code{thresholdWS}. \code{clustering_auto} computes also the weighted clustering coefficient by Barrat et al. (2004), relying on function \code{\link[igraph]{transitivity}} from package \code{\link[igraph]{igraph}}. For the computation of the local clustering coefficient, a node must have at least two neighbors: for nodes with less than two neighbors \code{NaN} is returned. } \value{A dataframe that includes one or more of the following indices. \item{clustWS}{The Watts & Strogatz's (1998) unweighted clustering coefficient} \item{signed_clustWS}{The signed version of the Watts & Strogatz's clustering coefficient} \item{clustZhang}{The Zhang & Horvath's (2005) weighted clustering coefficient} \item{signed_clustZhang}{The signed version of the Zhang & Horvath's (2005) clustering coefficient} \item{clustOnnela}{The Onnela et al.'s (2005) clustering coefficient} \item{signed_clustOnnela}{The signed version of the Onnela et al.'s (2005) clustering coefficient} \item{clustBarrat}{The Barrat et al.'s (2004) weighted clustering coefficient} } \references{ Barrat, A., Barthelemy, M., Pastor-Satorras, R., & Vespignani, A. (2004). The architecture of complex weighted networks. In Proc. Natl. Acad. Sci. USA 101 (pp. 3747-3752). Costantini, G., Perugini, M. (in press), Generalization of Clustering Coefficients to Signed Correlation Networks Langfelder, P., & Horvath, S. (2008). WGCNA: an R package for weighted correlation network analysis. BMC Bioinformatics, 9, 559. Onnela, J. P., Saramaki, J., Kertesz, J., & Kaski, K. (2005). Intensity and coherence of motifs in weighted complex networks. Physical Review E, 71(6), 065103. Watts, D. J., & Strogatz, S. H. (1998). Collective dynamics of "small-world" networks. Nature, 393(6684), 440-442. Zhang, B., & Horvath, S. (2005). A general framework for weighted gene co-expression network analysis. Statistical Applications in Genetics and Molecular Biology, 4(1). } \author{ Giulio Costantini (giulio.costantini@unimib.it), Sacha Epskamp (mail@sachaepskamp.com) } \note{ Part of the code has been adapted from package \code{WGCNA} (Langfelder & Horvath, 2008). } \section{Warning}{ The function requires an undirected network. To convert a directed network to undirected use for instance function \code{\link{upper.tri}} (see examples). } \seealso{ \code{\link{centrality_auto}} } \examples{ set.seed(1) # generate a random (directed) network: net_ig <- igraph::erdos.renyi.game(n=8, p.or.m=.4, type="gnp", directed=TRUE) # convert it to an adjacency matrix: net <- as.matrix(igraph:::get.adjacency(net_ig, type="both")) # convert it to a signed and weighted network: net <- net*matrix(rnorm(ncol(net)^2), ncol=ncol(net)) # make it undirected: net[upper.tri(net)] <- t(net)[upper.tri(net)] clustcoef_auto(net) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{signed} \keyword{weighted} \keyword{clustering}% __ONLY ONE__ keyword per line qgraph/DESCRIPTION0000644000176200001440000000303014521151262013235 0ustar liggesusersPackage: qgraph Type: Package Title: Graph Plotting Methods, Psychometric Data Visualization and Graphical Model Estimation Version: 1.9.8 Authors@R: c( person("Sacha", "Epskamp", email = "mail@sachaepskamp.com",role = c("aut", "cre")), person("Giulio", "Costantini", role = c("aut")), person("Jonas", "Haslbeck", role = c("aut")), person("Adela", "Isvoranu", role = c("aut")), person("Angelique O. J.", "Cramer", role = c("ctb")), person("Lourens J.", "Waldorp", role = c("ctb")), person("Verena D.", "Schmittmann", role = c("ctb")), person("Denny", "Borsboom", role = c("ctb"))) Maintainer: Sacha Epskamp Depends: R (>= 3.0.0) Imports: Rcpp (>= 1.0.0), methods, grDevices, psych, lavaan, plyr, Hmisc, igraph, jpeg, png, colorspace, Matrix, corpcor, reshape2, ggplot2, glasso, fdrtool, gtools, parallel, pbapply, abind ByteCompile: yes Description: Fork of qgraph - Weighted network visualization and analysis, as well as Gaussian graphical model computation. See Epskamp et al. (2012) . BugReports: https://github.com/SachaEpskamp/qgraph License: GPL-2 LazyLoad: yes LinkingTo: Rcpp Suggests: BDgraph, huge NeedsCompilation: yes Packaged: 2023-11-03 08:14:42 UTC; sachaepskamp Author: Sacha Epskamp [aut, cre], Giulio Costantini [aut], Jonas Haslbeck [aut], Adela Isvoranu [aut], Angelique O. J. Cramer [ctb], Lourens J. Waldorp [ctb], Verena D. Schmittmann [ctb], Denny Borsboom [ctb] Repository: CRAN Date/Publication: 2023-11-03 11:00:02 UTC qgraph/src/0000755000176200001440000000000014521125762012330 5ustar liggesusersqgraph/src/layout_rcpp.cpp0000644000176200001440000000647214430573263015410 0ustar liggesusers#include using namespace Rcpp; #include #include // [[Rcpp::export]] NumericMatrix qgraph_layout_Cpp( int pniter, int pvcount, int pecount, NumericVector maxdelta, double parea, double pcoolexp, double prepulserad, IntegerVector Ef, /* Edges from */ IntegerVector Et, /*Edges t0*/ NumericVector W, NumericVector xInit, NumericVector yInit, LogicalVector Cx, LogicalVector Cy, int digits ) { /* Calculate a two-dimensional Fruchterman-Reingold layout for (symmetrized) edgelist matrix d. Positions (stored in (x,y)) should be initialized prior to calling this routine. */ int n = pvcount; int m = pecount; double frk; double ded; double xd; double yd; double rf; double af; int i; int j; int k; int l; int niter = pniter; //double maxdelta; double area = parea; double coolexp = pcoolexp; double repulserad = prepulserad; /*Allocate memory for transient structures*/ // dx=(double *)R_alloc(n,sizeof(double)); // dy=(double *)R_alloc(n,sizeof(double)); // t=(double *)R_alloc(n,sizeof(double)); // Rcpp way: NumericVector dx(n); NumericVector dy(n); NumericVector t(n); // Copy xIint and yInit: NumericVector x(n); NumericVector y(n); for (i = 0; i < n; i++){ x[i] = xInit[i]; y[i] = yInit[i]; } frk=sqrt(area/(double)n); /*Run the annealing loop*/ for(i=niter;i>=0;i--){ /*Clear the deltas*/ for(j=0;j 0.000001) { xd/=ded; /*Rescale differences to length 1*/ yd/=ded; } af=ded*ded/frk*W[j]; dx[k]-=xd*af; /*Add to the position change vector*/ dx[l]+=xd*af; dy[k]-=yd*af; dy[l]+=yd*af; } /*Dampen motion, if needed, and move the points*/ for(j=0;jt[j]){ /*Dampen to t*/ ded=t[j]/ded; dx[j]*=ded; dy[j]*=ded; } if (!Cx[j]){ x[j]+=Rf_fround(dx[j],digits); /*Update positions (correcting for floating point errors)*/ } if (!Cy[j]){ y[j]+=Rf_fround(dy[j],digits); } } } NumericMatrix Layout(n,2); // Fill layout: for (i=0;i do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include using namespace Rcpp; #ifdef RCPP_USE_GLOBAL_ROSTREAM Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif // qgraph_layout_Cpp NumericMatrix qgraph_layout_Cpp(int pniter, int pvcount, int pecount, NumericVector maxdelta, double parea, double pcoolexp, double prepulserad, IntegerVector Ef, /* Edges from */ IntegerVector Et, /*Edges t0*/ NumericVector W, NumericVector xInit, NumericVector yInit, LogicalVector Cx, LogicalVector Cy, int digits); RcppExport SEXP _qgraph_qgraph_layout_Cpp(SEXP pniterSEXP, SEXP pvcountSEXP, SEXP pecountSEXP, SEXP maxdeltaSEXP, SEXP pareaSEXP, SEXP pcoolexpSEXP, SEXP prepulseradSEXP, SEXP EfSEXP, SEXP EtSEXP, SEXP WSEXP, SEXP xInitSEXP, SEXP yInitSEXP, SEXP CxSEXP, SEXP CySEXP, SEXP digitsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type pniter(pniterSEXP); Rcpp::traits::input_parameter< int >::type pvcount(pvcountSEXP); Rcpp::traits::input_parameter< int >::type pecount(pecountSEXP); Rcpp::traits::input_parameter< NumericVector >::type maxdelta(maxdeltaSEXP); Rcpp::traits::input_parameter< double >::type parea(pareaSEXP); Rcpp::traits::input_parameter< double >::type pcoolexp(pcoolexpSEXP); Rcpp::traits::input_parameter< double >::type prepulserad(prepulseradSEXP); Rcpp::traits::input_parameter< IntegerVector >::type Ef(EfSEXP); Rcpp::traits::input_parameter< /* Edges from */ IntegerVector >::type Et(EtSEXP); Rcpp::traits::input_parameter< /*Edges t0*/ NumericVector >::type W(WSEXP); Rcpp::traits::input_parameter< NumericVector >::type xInit(xInitSEXP); Rcpp::traits::input_parameter< NumericVector >::type yInit(yInitSEXP); Rcpp::traits::input_parameter< LogicalVector >::type Cx(CxSEXP); Rcpp::traits::input_parameter< LogicalVector >::type Cy(CySEXP); Rcpp::traits::input_parameter< int >::type digits(digitsSEXP); rcpp_result_gen = Rcpp::wrap(qgraph_layout_Cpp(pniter, pvcount, pecount, maxdelta, parea, pcoolexp, prepulserad, Ef, Et, W, xInit, yInit, Cx, Cy, digits)); return rcpp_result_gen; END_RCPP } static const R_CallMethodDef CallEntries[] = { {"_qgraph_qgraph_layout_Cpp", (DL_FUNC) &_qgraph_qgraph_layout_Cpp, 15}, {NULL, NULL, 0} }; RcppExport void R_init_qgraph(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } qgraph/NEWS0000644000176200001440000013132114521125647012243 0ustar liggesusersChanges in Version 1.9.8 - Fixed some encoding issues Changes in Version 1.9.6 - Fixed a bug where rescale does not work with only one node - Fixed a bug where minimum > cut might lead to edges not being plotted Changes in Version 1.9.5 - Removed z-score warning ?centralityPlot Changes in Version 1.9.4 - Small fix for CRAN Changes in Version 1.9.3 - Fixed an issue with cor_auto when using FIML for missing data handling. Thanks to Kai Nehler. Changes in Version 1.9.2 - Fixed another bug in checking input class of qgraph.loadings Changes in Version 1.9.1 - Fixed a bug in checking input class of qgraph.loadings Changes in Version 1.9 - Removed unused dplyr import - Fixed a bug with manual color assignment - Fixed an issue with 'centrality_auto' on M1 chipped Mac computers Changes in Version 1.8 - The 'centralityPlot' and 'clusteringPlot' functions now use 'raw0' as default for the 'scale' argument, meaning that centrality and clustering coefficients are no longer standardized to z-scores by default. Changes in Version 1.7.1 - Fixed a bug with the flow() function - Added theme = "pride" Changes in Version 1.7.0 - The 'centrality()' function now includes R-squared (predictability) - 'qgraphAnnotate' has been removed due to CRAN removal of sendplot - Fixed a bug when using edges colored as probabilities with more than 20 nodes - the 'probCol' argument now defaults to "black" - Removed several dependencies - Removed 'as_ggraph' to reduce dependencies Changes in Version 1.6.9 - Fixed a bug where using qgraph on the output of qgraph did not use all arguments correctly Changes in Version 1.6.8 - Fixed a bug with legend not showing when edge labels are used. Changes in Version 1.6.7 - node labels are now colored white for lighter nodes with theme = "gray" - Added argument label.color.split Changes in Version 1.6.6 - Fixed a bug where the diagonal of undirected graphs was counted twice in getWmat - Fixed a bug with bnlearn as input Changes in Version 1.6.5 - Removed findGraph Changes in Version 1.6.4 - Added te as_ggraph function to transform a qgraph object into a ggraph object - forcePD now defaults to FALSE in cor_auto - Added the argument edge.label.margin to qgraph - Fixed a bug with fade not working as matrix/vector when edge.colors are ised Changes in Version 1.6.3 - Updated warning message for when the densest network is selected. - Added an option countDiagonalPars to ggmFit to count diagonal of the network as parameters, now set to TRUE by default. - Added support for drawing confidence regions using pie charts with the following arguments: - pieCImid, pieCIlower, pieCIupper, pieCIpointcex, pieCIpointcex - Fixed a bug with clusteringTable and clusteringPlot Changes in Version 1.6.2 - Fixed a bug in centralityPlot Changes in Version 1.6.1 - EBIC computation now uses the number of variables in the extra penalty, as opposed to the number of cases. - Removed old spelling errors left in for backward competability. The argument 'graph' must now be "cor" or "pcor" - Added three more AIC alternatives (thanks to Erikson Kaszubowski) Changes in Version 1.6 o Major changes: o Removed several dependencies in order to start taking qgraph out of dependency hell: o The following functions have been *removed*: - qgraph.pca - qgraph.cfa - qgraph.sem - qgraph.semModel - qgraph.lavaan - qgraph.panel - qgraph.svg - qgraph.efa o The following functionality has been *removed* from qgraph: - tooltips - overlay - overlaySize o The Fruchterman Reingold layout has been changed. These changes *will* lead to a different layout to be returned now, but should prevent such disrepancies in the future. - The function has been translated from C to C++ using Rcpp - Results are now rounded in the algorithm. This should lead to more consistency in its results o centralityPlot now only shows node degree or strength by default o inlcude = "all" is now supported in centralityPlot to show all available centrality measures o Minor changes: o Fixed a bug in ggmFit, leading to the number of parameters and estimated graph structure to occasionally be wrong o Added 'res' to the allArgs list o Added 'node.label.offset' and 'node.label.position' arguments to control the position of node labels (thanks to mihaiconstantin) o Some small bugfixes o Fixed a bug leading to centralityPlot occasionally crashing o centralityPlot and clusteringPlot now support empty networks, and give an informative error when used on single-node networks o theme = "neon" is now supported Changes in Verson 1.5.1 o Added a criterion argument to ggmModSelect to allow for other criteria to be used. o The centrality help page now correctly describes "InExpectedInfluence" and "OutExpectedInfluence" Changes in Verson 1.5 o Added the 'flow' function to make flow plots o Fade now accepts numeric values, setting the exact fading (per edge) o Added theme = "gimme", defaulting to posCol = "red", negCol = "blue", parallelEdge = TRUE and fade = FALSE o Added the argument 'title.cex' o centrality() now also returns InExpectedInfluence and OutExpectedInfluence o The 'include' argument in centralityPlot and clusteringPlot can now also be used to re-order the facets of the plot. o Added the orderBy argument to centralityPlot() o Added the 'threshold' argument to 'EBICglasso', which can be used to threshold weak edges, ensuring high specificity. o EBICglasso will now return a warning for when a dense network is selected o Added the 'ggmModSelect' function, which can be used for unregularized GGM model search o ggmFit now treats absolute edges under sqrt(.Machine$double.eps) as zeroes o Fixed a bug where graph = "pcor" with threshold = "sig" required column/row names for the input correlation matrix Changes in Version 1.4.4 o Fixed a bug in EBICglasso in which only nLambda = 100 was supported o EBICglasso now gives a warning if the densest (lowest lambda) network is selected o EBICglasso now gives a message if the empty network is selected o qgraph() now supports 'gamma' as alias for 'tuning' o qgraph() now has a lambda.min.ratio argument to control the same argument in EBICglasso o Fixed a bug in which pie charts were not drawn with > 50 nodes o Re-enabled support for BDgraph o qgraph no longer warns for when using hidden arguments in semPlot Changes in Version 1.4.3 o Fixed a bug in which 'curveScaleNodeCorrection' defaulted to FALSE instead of TRUE o qgraph() now gives a warning if the user uses a not-documented argument. o Fixed a bug in computing baseline DF of ggmFit o ggmFit now has an argument for number of parameters o Fixed a bug in computing baseline chi-square of ggmFit o ggmFit now scales pcor matrix to match diagonal of var-cov matrix rather than diagonal of inverse var-cov matrix o Using EBICglasso with returnAllResults now returns 'optwi' as well Changes in Version 1.4.2 o centrality_auto no longer outputs igraph messages to the console o Added theme option "gray" for grayscale figures o Automatically makes negative edges dashed o Added 'negDashed' argument to make negative edges dashed o bootnet getWmat method now takes signed and weighted into account o Centrality functions now have arguments weighted and signed to compute centrality based on the signed or unweighted graph o Fixed a bug with refit = TRUE Changes in Version 1.4.1 o Added 'curveScaleNodeCorrection' argument o Added 'palette' and 'theme' functions o Colorblind mode is now supported with theme = "colorblind" o label.color can now be `NA`, which will make the label white if the average of RGB colors of the node is less than 0.25 * 255 o Added ggmFit function (moved from lvnet), which computes fit measures of a GGM network based on Lavaan. Results are comparable to lavaan if lavaan results are estimated on a covariance matrix. o Added 'refit' argument to 'EBICglasso' and 'qgraph'. If set to TRUE the optimal EBIC glasso network is refitted without LASSO regularization Changes in Version 1.4.0 o Added the 'pathways' function to highlight shortest paths between sets of nodes o In collaboration with Adela M. Isvoranu o Added functionality to draw pie graphs as node shape. o New arguments: 'pie', 'pieColor', 'pieColor2', and 'pieBorder' o In collaboration with Jonas Haslbeck o The label.cex argument can now be used in combination with label.scale = FALSE o Edge and node labels are now properly centered o EBICglasso now acts as alias for graph argument o The 'graph' argument in 'qgraph' now gives an error if it is assigned with an unsupported option Changes in Version 1.3.5 o The label.cex argument now defaults to 1 if label.scale = FALSE o The label.scale.equal argument now can be set to TRUE to make all labels scale equally to the largest cex such that the longest label fits. o Can alternatively be a factor or numeric vector indicating which groups of nodes whould have the same label shape o Fixed a bug with betweenness computation using pkg = "igraph" argument in centrality() o The 'centralityPlot' and 'clusteringPlot' now use an argument 'scale' to set the scale of the x-axis, and print a message when this scale is z-scores or relative. o The 'shape' argument can now be used with subplots o getWmat now supports 'bootnetResult' objects o Fixed a bug with as.igraph not working if there is only 1 edge o Added 'smallWorldIndex' function to compute smallworldness of unweighted graph o Added arguments 'label.fill.vertical' and 'label.fill.horizontal' to 'qgraph' o Apparently skipped over version 1.3.4 Changes in Version 1.3.3 o The 'lambda.min.ratio' of 'EBICglasso' now defaults to 0.01 instead of 0.1, similar to earlier versions of qgraph. This will cause EBICglasso and qgraph(..., graph = "glasso") to potentially return less sparse models! o Added 'countDiagonal' argument to qgraph, EBICglasso and EBIC, so that behavior of older versions of qgraph can be mimicked. o Fixed a bug when computing average weights matrix with network that only has negative edges o Added 'means', 'SDs' and 'meanRange' arguments to qgraph(). These can be used to plot bars inside nodes indicating the means and variances of the nodes. o Added two new options for the 'legend.mode' argument in qgraph(): "style1" (default) and "style2", which use information from both the 'groups' and the 'nodeNames' arguments to plot the legend. Changes in Version 1.3.2 o Solved an issue with bg = TRUE resulting in small white borders around background pixels o centralityTable and clusteringTable now appropriately call melt from the reshape2 namespace o cor_auto now suppresses warnings o Added 'background' argument to qgraph() o qgraphGUI now links to shiny app o centrality() can now use igraph for faster computations o qgraphAnnotate now works on Windows o Added the makeBW function to plot a network that can be understood also in black and white or grayscale. Thanks to Giulio Costantini! Changes in Version 1.3.1 o 'sendplot' package moved from imports to suggests list. o Added the function 'qgraphMixed' which is designed to plot graphs that contain both undirected and directed edges. o Added the 'repulsion' argument to averageLayout o Added the 'verbose' argument to cor_auto o Added the 'penalizeMatrix' argument to EBICglasso o Fixed a bug with non-ACSII labels o Fixed bug in clusterinPlot when sd = 0 Changes in Version 1.3 o New Features o The 'minimum' argument can now be set to "sig" when graph = "association" (correlation network) or graph = "concentration" (partial correlation network). This will set minimum to the value from which correlations become significant. Use 'alpha' and 'bonf' arguments to control significance level. Note that this value is just as arbitrary as any other value for 'minimum'. Also, as usual with the 'minimum' argument the result is only graphical; edges are not omitted but merely not shown. This is important in calculating centrality measures. o Added the 'threshold' argument which can be used to REMOVE edges under a certain threshold. This argument can also be used to remove non-significant edges in correlation and partial correlation networks. Note that this is different from 'minimum' in that 'minimum' only works graphically and does not display edges while 'threshold' literally removes them. Thus, 'threshold' can be used for model selection where 'minimum' can not, and they result in different spring layouts and centrality measures. o Added the 'repulsion' argument, which is a shortcut to specify layout.par = list(repulse.rad = nNodes^(repulsion * 3)). This argument can be used to easily scale the repulsion used by the fruchterman reingold algorithm o Added the 'findGraph' function that uses brute, stepup and stepdown model search to find an optimal correlation or partial correlation network according to EBIC. This is only recommended for small networks; for larger network EBICglasso should be preferred. o Added qgraphAnnotate function to create a HTML file with tooltips. o Major changes o The EBIC-glasso method no longer penalizes the diagonal of the precision matrix nor does it include these diagonal values in the number of nonzero parameters used in the EBIC. This results in DIFFERENT graphs, that are overall sparser. o The default for the 'graph' argument is now "default" rather than "association". "association" now checks if the input is a correlation matrix, and will run cov2cor. This means that covariance matrices can now be used for "association", "concentration" and "glasso" graphs. o The 'graph' argument can now be assigned "cor" or "pcor" as alternatives to "association" and "concentration" respectively. o Checks for positive definiteness are now built into qgraph(), cor_auto() and EBICglasso(). The nearPD function of Matrix will be used when appropriate. o 'minimum' now defaults to 0 regardless of graph size. o centralityPlot/Table and clusteringPlot/Table now return standardized measures by default. o cor_auto() now uses pairwise observations rather than listwise. The "missing" argument controls how missing data is handled. o EBICglasso gamma now defaults to 0.5 to be in line with qgraph(..., graph = "glasso") o Minor changes / bug fixes: o Added 'noPar' argument, which disables the internal par() call. This is needed when using qgraph in the subplot functionality. o Fixed a bug causing tooltips to be unusable with filetype = "svg" o Fixed a bug using an expression for labels. o 'cut' can now be set to NA to mimic cut=0 o Several small bugfixes o clustcoef_auto now returns labels as rownames Changes in Version 1.2.5 o Changes in default behavior: o The default for the 'cut' argument has significantly changed. Instead of using a static value of 0.3 the cut value is now adapted to the edge weights present in the graph. It is now chosen such that the number of edges above cut is equal to two times the number of nodes, or 25% of all edges. o esize now makes edges larger in small graphs and about the same in larger graphs. o vsize now makes nodes larger in small graphs and smaller in large graphs. o asize is now a bit larger in small graphs. o Graphs should now be somewhat zoomable in flexible graphical devices such as RStudio zoom button. However, this is still not recommended. See 'usePCH' argument documentation for details. o New features: o Added the function 'EBICglasso' which will compute a sparse partial correlation matrix using glasso and EBIC criterium. o Added option to 'graph' argument in qgraph(). Setting graph = "glasso" will automatically call EBICglasso. This makes use of the new 'sampleSize' and 'tuning' parameters. o Packages glasso and huge are now supported. o Added 'centralityPlot', 'centralityTable', 'clusteringPlot' and 'clusteringTable' functions to facilitate interpretation of centrality and clustering statistics. o Added 'averageLayout' to compute the average layout of multiple graphs. o Added cor_auto function which automatically detects ordinal variables in a dataset and computes an appropriate correlation matrix using Lavaan's lavCor. o Added VARglm function for estimating VAR models using GLM. o Changes: o Updated qgraph help page and examples o Bug fixes: o Fixed a bug where curved edges became indented between two large, close nodes. o Fixed a bug when using overlay with groups consisting of only one node. Changes in Version 1.2.3 New features: o New graph analysis functions added by Giulio Costantini: o 'centrality_auto' o 'clustcoef_auto' o 'smallworldness' o 'clustOnnela', 'clustZhang', 'clustWS', 'mat2vec' o Added 'parallelEdge' and 'parallelAngle' arguments. Set parallelEdge = TRUE to obtain parallel straight edges rather than curved edges. o Added 'edgeConnectPoints' argument. This argument specifies the point for each edge to which it connects to a node, in radians. Can be either a matrix with a row for each edge and two columns: The first column indicates the connection point of the source of the edge and the second column specifies the connection point of the destination of the edge. Can also be an array with a row and column for each node two slices which indicate the source and destination of the edge connecting the two nodes o 'arrowAngle' now defaults to pi/6 for weighted graph instead of pi/4. o Edges are now drawn only to the start of arrows instead of to the edge of nodes regardless of if there is an arrow. o Added the argument 'label.font' which can be used to specify the font of node labels. o Added the argument 'edge.label.font' which can be used to specify the font of edge labels. o Added the argument 'font' which can be used to set both node and edge label fonts. o Added the 'edge.width' argument that scales both 'esize' and 'asize'. Making it easier to make both edges and arrows larger. o Added 'node.width' and 'node.height' arguments that scales 'vsize' and 'vsize2'. o BDgraph is now supported. o bnlearn is now properly supported o Added 'title' argument. o Added preExpression and postExpression arguments Changes: o Added internal function 'drawEdge' which draws the edge. This function can be used similary to arrows() in any plot. o Edges representing p-values of exactly 0 in mode="sig" graphs are now represented by the strongest possible positive edge. o The functionality of using * in labels to plot the label in symbol font has been removed as this is now possible by using either an expression as label or by setting the font manually to 5. o Graphical arguments in the qgraph help page have been grouped in subcategories. o The midpoint of curved edges is now based on the centers of nodes o qgraph.animate now returns a list of graph objects o qgraph.animate now smoothes layout position per frame by default Bugfixes: o XKCD easter egg works once again o Fixed a bug where edge labels could not be assigned using a matrix. Changes in Version 1.2.3 o Maintainer e-mail changed to mail@sachaepskamp.com to be in line with other packages maintained by me o Major changes: o centrality() now also makes weights positive in computing in and out degree o bnlearn and bn.strength classes are now supported o Added edge.label.position argument. If an edge has a label its source is now rescaled when its destination is rescaled to keep the middle of the edge correctly o Minor changes: o Graphs now only default to unweighted if all entries are either 0 or 1 o residScale is now normalized o border width can now be varied per node o Evaluation of subplots now uses the global environment for lookups o The 'labels' argument can now be used with pcalg input o Background color can now be transparent o Added subpars argument to pass 'par' arguments to sublots o Added argument 'subplotbg' to control the background of subplots o numeric edge labels now converted to characters o Centering with aspect is TRUE now only looks at the range of x and y coordinates, placing the graph more to the center o Self-loops are now removed in centrality() o Machine tolerance is now taken into account when testing if an input matrix is symmetrical. Because of this, correlation matrices computed with cov2cor no longer need to be rounded o If the par("bg") color is "transparent" the background is set to "white" instead of "transparent" o filetype argument no longer supports 'x11' o Bug fixes: o Edge labels of '' will now not plot a background. o Fixed a bug where 'lty' was not working correctly when edge lists with weights of zero where used as input. Reported by Laura Bringmann. o as.igraph works again o Fixed a bug causing background on edge labels not to be plotted when edge.labels = TRUE o Fixed bug with edge.labels = FALSE o Node color on transparent background now default to white instead of transparent o Fixed a bug where in qgraph.animate where no default arguments could be passed anymore o Fixed a bug where edge labels could not be expressions o curve entries for edges of weight 0 are now correctly removed Changes in Version 1.2.2 o New features o Added functionality to draw bars inside nodes (e.g., how semPlot shows thresholds). Note that this is not intended to be used as shading lines, which will be added in a future version. See the qgraph help page for a description of the new arguments 'bars', 'barLength', 'barColor', 'barSide' and 'barsAtSide'. o Added 'curveScale' argument: Logical, should curve scale with distance between nodes. Defaults to TRUE. If FALSE, the curve can be exactly determined. Recommended to set to TRUE for graphs and FALSE for diagrams. o Using a matrix with relative positioning of nodes (grid layout) can now also be assigned characters of the node labels. e.g., assigning matrix(LETTERS[1:4],2,2) to 'layout' will place node labeled "A" at top left and "D" at bottom right. Use NA rather than 0 in all other positions. o Added 'pastel' argument for pastel colors o Added 'rainbowStart' argument to control the start of rainbow functions o Added 'nodeNames' argument, which can be used to make a legend based on node labels for every node. o Added 'legend.mode' argument to indicate if legend should be based on groups or node names. o Added support for custom node shapes using polygons. The polygonList argument can be used to add polygon shapes to the lookup table of the shape argument. o By default this list includes the shapes "ellipse", "heart" and "star" o Major changes o INTERNAL CHANGE IN QGRAPH o A change has been made to the qgraph internal representation. First, qgraph() computed quite some parameters for the graph, then used them to plot and finally created an output model containing the arguments that allowed the plot to be plotted again. Now, qgraph() creates a model, and plot.qgraph() plots it (by default, plot.qgraph is now called at the end of qgraph()). o Because of this, every aspect of a graph can now be manually changed between running qgraph and plotting the output (use DoNotPlot to ignore plotting the first run). o Currently this is implemented simply by storing all values in qgraph() that are used in plot.qgraph, in a future function some of the arguments from qgraph will be moved to plot.qgraph to make a more natural distinction between plotting arguments (such as filetype) and arguments that control the graph. o Minor changes o minimum is now taken into account in automatically curving edges. Edges are only automatically curved if multiple edges between two nodes actually show up in the graph. o Node and edge labels can now be assigned using a list. o graphNel objects can now result in undirected graphs. o pcalg objects will now result in mixed graphs. o Legend placement now takes the 'mar' argument into account. o The 'aspect' argument now also works for non-square plotting areas o The default rotation of selfloops now point away from the center of a group only if that group contains at least two nodes (rather than one) o Bugfixes o Fixed a bug where tooltips could not be plotted with filetype='svg'. Tooltips now no longer require labels as well. o Using the filetype argument in combination with a legend (or filetype='svg') will now once again create a larger plotting window such that the graph is plotted in a square window. o Legends are now also plotted when plotting a graph of significance values with no groups specified. Changes in Version 1.2.1 o Curved edges can now be displayed as mostly straight edges what run parallel. This can be done using the new argument 'curvePivot'. See documentation of 'qgraph' for more information. o Shape of curved edges can now be controlled with 'curveShape' and 'curvePivotShape'. o The start of edges is now also reset to the edge of nodes if the destination of the edge is also reset. This will cause that bidirectional edges now look the same as directed edges. o Fixed a bug where graphs with a single edge were unable to plot edge labels. o jpeg package is now used to read jpeg files. Changes in Version 1.2 o NEW MAJOR FEATURES: o Support added to include R plots as nodes. This can be done with the 'subplots' argument, which takes a list of R expressions that create a plot when evaluated. The 'subplot' function in the R package Hmisc is used to create the subplots. o Added the 'images' argument which can be used to plot png and jpeg files as node images. o Added knots, knot.size, knot.color, knot.borders, knot.border.color and knot.border.width arguments that allow tying edges together at their center. o igraph support added: o the as.igraph function converts a qgraph object to an igraph object o igraph layout functions can now be assigned to the layout argument in qgraph(). The layout.par function can be used to specify arguments to the function. o Edge color specification o Added argument for controlling default positive edge color: posCol o Added argument for controlling default negative edge color: negCol o Added argument for controlling default unweighted edge color: unCol o Custom colors now fade to the background color if fade=TRUE. o Edge colors can now be overwritten for specific edges. If edge.color is a vector, NA indicate the edge color is determined by default o Defaults added. The options "qgraph" can be assigned a list with default arguments for qgraph(). o qgraph plots are now normalized across different sizes of the plots if the argument 'normalize' is set to TRUE (the default). Many arguments are normalized to the diagonal of the plot region so that the same result is obtained o NEW MINOR FEATURES: o 'shape' can now be set to 'rectangle' o Added 'vsize2' argument to control the vertical size of nodes if shape is 'rectangle'. o Added 'label.prop' argument to control the relative width of labels to the width of nodes o Labels are no longer coerced to characters, this means that expressions can now also be used for labels o Added 'label.cex' argument to scale the node labels o Added 'border.width' argument to control the border width. o 'color' can now be assigned 'background', which will transfer the background color to the node. This is now the default o added XKCD easter egg, use XKCD = TRUE! o CHANGES: o Many internal changes to facilitate the new 'semPlot' package. o The functions qgraph.loadings, qgraph.sem and qgraph.lavaan will soon become deprecated. o Several change to how curves work in qgraph: o the midpoint of curved edges is now set using the inner function PerpMid. This function now consistently finds a midpoint perpendicular to the edge regardless of shape of the plot in inches. o Added 'curveDefault' argument and some changes to 'curve' argument. 'curveDefault' now controls the default curvature (1 by default). If 'curve' is NA the default curvature is used (curved edges if there are multiple edges between two nodes). If 'curve' is of length 1 it overwrites 'curveDefault' for backward compatibility. o 'curveAll' argument added. This argument controls if all edges should be curved or only edges of nodes with multiple edges between them. o Multiple edges between nodes are now curved uniquely for each edge by default. o The 'curve' argument now scales the distance between the midpoint of an edge and the midpoint of the curved edge proportional 2.5% of the diagonal size of the plot in inches. o For more consistency in argument names 'border.colors' has been renamed to 'border.color' and 'lcolor' has been renamed to 'label.color' o non-finite weights are now set to zero and omitted o Improved the scaling algorithm of the labels. Labels now scale properly to 75% of the width of a node. o larger nodes are now plotted on top of smaller nodes. o Borders are now plotted at the same level as node colors. This means borders will no longer overlap overlapping nodes. Labels will still overlap overlapping nodes to increase readability o Edge ends are now replaced if the node is partly transparent, which is no longer based on vTrans o 'label.color' and 'border.color' now default to 'white' if the mean color (rgb) is higher than 0.5 o 'color' now defaults to the background color. o trans now once again only defaults to TRUE if bg=TRUE. o Argument lists can now be nested. Lower level arguments take precedence over higher nested arguments. o Midpoint of straight edges for placing edge labels is now based on the center of the edge, not on the center between the two connected nodes. o Edge labels are now plotted only for visible edges o 'transparency' now defaults to FALSE if 'bg' is a color other than 'white' o 'transparancy' is now matched by trans argument as well o The background of edge labels is now the same as the background color by default o Edge labels are now colored the same as the edge by default o BUG FIXES: o Fixed a bug where bidirectional edges where plotted twice o Fixed a bug where edge labels caused an error if all edge weights are zero o Fixed a bug where bidirectional edges were not stored properly o Fixed a bug where edge colors incorrectly returned an error stating the length was wrong o Fixed a bug where incorrect edge labels where placed on edges. o Fixed a bug where specifying the wrong number of edge labels resulted in an error o Fixed a bug where border colors weren't repeated for all nodes. o Fixed a bug in Cent2EdgeNode not restoring mar, it can now be called outside of qgraph o Fixed a bug where the midpoint of curved lines where not perpendicular to the midpoint between two nodes in non-square plotting areas Changes in Version 1.1.0 o MAJOR CHANGES: o FIRST VERSION OF GUI INCLUDED, call qgraph with gui=TRUE or use qgraph.gui(). o Selfloops have been reworked through the internal function SelfLoop() and should now look much better. Can now be rotated using the loopRotation argument. The loop argument for controlling the size is currently defunct. o Method for moving beginning and end of edge to the edge of nodes has been reworked through the internal function Cent2Edge(). This should now be much more stable, and no longer be bugged if a graph is plotted with margins. o MINOR CHANGES o Some internal changes to facilitate the upcoming semPlot package (https://github.com/SachaEpskamp/semPlot) o Added 'residScale' and 'residEdge' arguments to qgraph() o Added 'residuals' argument to qgraph(). If TRUE, selfloops are plotted as residuals with no origin. o Fixed issues with margins when plot=FALSE o Fixed dimensionality issues of arrowheads. o Improved default arguments for 'esize'. Now sets to 2 if the graph is unweighted and halfs the default value if graph is directed. o Fixed outdated information in the qgraph help page on specification of layout argument. o Several small bugfixes Changes in Version 1.0.5-2 o Removed tikzdevice option due to removal from CRAN. Please see www.sachaepskamp.com/qgraph for details. Changes in Version 1.0.5 o Fixed a compatibility issue with the most recent version of Lavaan (0.4-14) Changes in Version 1.0.4 o Added reference to JSS paper. o The name of the author in several function Rd pages has been changed to reflect my new e-mail address. Changes in Version 1.0.3 o Added the 'aspect' argument to qgraph. Set this to TRUE to keep the aspect ratio of the original layout (e.g. result from layout="spring"). o Fixed a bug where an unweighted graph object would be plotted as a weighted graph in consecutive calls. o Fixed issues with the "centrality" function in combination with bidirectional edges. Changes in Version 1.0.2 o Fixed a bug in with using a tuning parameter of 0 in centrality() o Fixed a bug with the arrows not being plotted properly when using a layout and non-square plotting area o Fixed a bug where qgraph() failed on graphs with 10 or more nodes when using a "pcalg" object as input Changes in Version 1.0.1 o Fixed a bug with using the 'directed' argument as a matrix. Changes in Version 1.0.0 o Because this was already the case for imported packages, qgraph is now Byte-Compiled and depends on R version 2.14. o Added the centrality() function to compute node centrality statistics. o It is now possible to manually set the edge width and color. See the added section on this in the qgraph help page. o Added the arguments 'layoutScale' and 'layoutOffset' to qgraph(). These can be used to define the coordinates range of the graph. o Changed the way qgraph.panel() works. Instead of partitioning the plot in 4 sections with layout() the function now makes only one plot containing 4 graphs. This should fix all issues with the partition persisting after using this function. o If qgraph.efa() or qgraph.pca() is used on a dataset with 20 or less variables the variable names (abbreviated to 3 characters) is used as label for the nodes. Changes in Version 0.5.3 o The labels argument now attempts to extract label names from the adjacency matrix for small graphs (<= 20 nodes) o With legend=TRUE a check is now made to see if the groups list is not NULL. Else legend=FALSE is forced o Legends are now plotted in the same window of the graph instead of a separate plot window. This should fix many issues with plots remaining partitioned after running qgraph as well as allow users to incorporate legends in partitioned plots. o Fixed issues with the legend of the scores argument. This should now work as expected. o Fixed issues with the qgraph.svg() function. This should now work again. o Significantly changed the qgraph() help page. It should now be much clearer. o Fixed an issue where lavaan objects with a mean structure would result in an error when using qgraph.lavaan(). Mean structure is now ignored in qgraph.lavaan(). o Changed the first argument of qgraph from 'adj' to 'input'. The help files no properly mention Weights matrices instead of Adjacency matrices. Changes in Version 0.5.2 o This version was aimed at fixing compatibility issues with sem 2.0.0 Changes in Version 0.5.1 o Fixed some minor bugs o Fixed issues with the qgraph.lavaan() function o Added the qgraph.animate() function for animating network growth and change! Check out its help page for more details. o Fixed bugs with arbitrary small networks. Such as single node networks, networks with no edges, and layouts with no variance in x or y coordinates. Changes in Version 0.5.0 o Graphs containing only one node can now be plotted o Fixed a bug that caused weird arrows and wrong graph shapes if one of the opened windows contained a graph with a legend o Added "x11" and "X11" as options for the filetype argument, useful for use with Rstudio o qgraph() now exits with previous par() settings restored. o Removed 'pcalg' from the suggests list because it is not actually used in qgraph and gave errors in R CMD CHECK Changes in Version 0.4.9 o Fixed a critical bug in making svg images o The legend now takes less space by default. o Added the argument 'GLratio' to qgraph() which specifies the relative size of the graph compared to the layout. o 'legend.cex' now defaults to 0.6 o qgraph.panel() now omits the legend by default as it could not handle legends anyway. o qgraph.panel() now uses better options for the layout of the factorial graph o Fixed a bug in qgraph.sem() causing the spring layout to malfunction. o Added proper references for the big 5 data Changes in Version 0.4.8 o The 'groups' argument can now be assigned a factor as well as a list. o qgraph.cfa should now correctly call only the sem package version of sem(). o In qgraph.sem(), connections between variables at the same level (e.g. regression among factors) can now be curved in the circular layout. o Similar to 'directed', the qgraph() arguments 'curve', 'lty' and 'bidirectional' can now also be supplied in matrix form. o qgraph.cfa() can now also use the lavaan package o qgraph.cfa() now has a 'pkg' argument indicating which package should be used for estimation. Defaults to "sem" and can also be "lavaan" o Added the qgraph.lavaan() function which creates an output document to the results of a lavaan fit simulair to what qgraph.sem() does for the sem package o Arguments in the qgraph() help file are now divided into multiple sections o Added the argument 'overlay' to qgraph(). Set this to true to plot a venn-diagram like overlay over the graph. o Several packages are now imported rather than suggested/depended o New feature: Significance graphs: o qgraph() can now be used to plot significance levels (p-values). To do this set mode="sig". This will result in a graph in which two nodes are connected if p<0.05. Different shades of blue indicate different levels of significance. A Bonferonni correction can also be applied. Also works with gray=TRUE. o Added the argument 'mode' to qgraph() which specifies what the edge weights mean. Currently this can be "strength" (default) or "sig" (see below) o The 'graph' argument in qgraph() can now be set to "sig" or "significance". If so, the p-value for each correlation is calculated (using the fdrtool package), and a significance mode graph is made. o The `bonf' argument in qgraph() can be set to TRUE to apply a Bonferonni correction by multiplying all p-values by the number of p-values o The `alpha' argument in qgraph() can be used to set significance levels of interest o The `OmitInsig' argument can be used to omit all correlations/p-values with a p-value above the lowest value in `alpha'. o Using `OmitInsig = TRUE' and `gray = TRUE' with a correlation matrix will create an unweighted graph in which only significant correlations are connected. o Using edgelists as input for qgraph() can now also be done with a dataframe and contain characters. These characters will be used as edge labels o qgraph() now also moderately supports "graphNEL" objects (Rgraphviz) and "pcAlgo" objects (pcalg). See examples. Changes in Version 0.4.7 o Added the qgraph.cfa() function, which performs a confirmatory factor analysis using the sem package o Fixed a bug in qgraph.sem() causing faulty path diagrams if nodes were labeled numerically o Fixed a bug in qgraph() causing misplaced edge labels if some edges are bidirectional o The 'rotation' argument is removed from qgraph.sem() because it should no longer be necessary o The "circle" layout is now default in qgraph.sem() o Added the 'gray' argument to qgraph(). If this is TRUE the graph will be plotted in gray-scale colors. Changes in Version 0.4.6 o New features: o Added more options for the 'directed' argument. When using an edgelist this can be a vector indicating per edge if it is directed or not. If an adjacency matrix is used then this can be a matrix indicating if the element indicates a directed edge. o the output of qgraph() now also contains the edgelist. Running qgraph() on the output of another qgraph() run should now give exactly the same graph. o Added three functions for S3 methods: print.qgraph(), summary.qgraph() and plot.qgraph(). o qgraph() now calls qgraph.loadings(), qgraph.efa(), qgraph.pca(), qgraph.sem() and qgraph.semModel() if the first argument is respectively of class "loadings", "factanal", "principal", "sem" and "mod". o Added the 'DoNotPlot' argument to qgraph(). If this is TRUE then nothing will be plotted, but the graph will still be returned as usual. o qgraph.layout() has been removed since the 'DoNotPlot' argument makes it redundant. o Updated examples with big 5 data. Examples are now also executable using example(qgraph). o qgraph.loadings() and its wrappers qgraph.efa() and qgraph.pca() have been revamped: o Residuals can now be plotted in qgraph.loadings() using the 'resid' argument. qgraph.efa() includes residuals by default. o Inter-factor correlations can now be plotted using the 'factorCors' argument. o qgraph.loadings() now runs faster and results in smaller PDF file sizes. o qgraph.loadings() now uses "circle" layout by default. o A "loadings" object can now be passed to qgraph.loadings(), a "factanal" object to qgraph.efa() and a "principal" object to qgraph.pca(). o Minor changes to qgraph.sem(): o New function added: 'qgraph.semModel()'. This function makes a path diagram based on a sem model. o New argument added to qgraph.sem(), 'panels', which can be assigned a vector indicating which panels should be plotted. o 'filetype' can now be anything but "pdf" to use qgraph.sem() to plot in R. o Bug fixes: o Fixed a bug that caused edges to be plotted several times in directed graphs. o The qgraph() function should now be significantly quicker. o Fixed an issue causing placement of edges to assume a square node as origin. Changes in Version 0.4.5 o Added the Big 5 dataset. o layout.orig is now returned as well as layout. This is the original layout (not rescaled) which can be used for constraints o in 'qgraph.loadings' nodes are now reordered if the factors can be identified o Added the 'vTrans' argument, which can be used to make nodes transparent. o Fixed a critical bug resulting in oversized edges if the largest absolute edge weight is negative Changes in Version 0.4.4 o Weights are now divided by the 'maximum' when computing the fruchterman-reingold layout. o Fixed an issue causing loops to become too large o Fixed an issue causing diag="col" assignment to also create loops o 'directed' now defaults to TRUE if an edgelist is used as input in 'qgraph' o Fixed a bug with the spring layout and groups list o The nfact argument now defaults to the amount of eigenvalues greater than 1 or the length of the groups list. o Using layout="circle" in qgraph.loadings should now generate a better looking layout. o 'width' and 'height' are now automatically adjusted to the size of the opened device. Changes in Version 0.4.3 o Fixed a bug in the 'tooltips' argument in qgraph. o Fixed a bug with numeric assignment for the 'arrows' argument on curved lines o By default lines are now only curved if there is no other edge with a weight over the minimum between the two nodes instead of if there is any other edge between the two nodes. o Greek labels are now identified by using sapply rather than a for-loop. o Added 'tex' as filetype. Graphs can now be output in a .tex format using the 'tikzDevice'. The .tex file can then be build in LaTeX compilers. o The 'tooltips' argument can now also be used for filetype='tex' o Replaced the use of 'windows' function with the 'dev.new' function. This should fix problems with the default output on platforms other than Windows. o By default, a new R window is now only opened if the active device is the NULL-device. o Added the 'qgraph.panel' function. Which creates a 4-panel plot with useful default graphs for correlation matrices. Changes in Version 0.4.2 o Changed the standard output in R to fixed width windows (windows(rescale="fixed"...). o Fixed a spelling error in the authors. o Minimum and maximum details moved slightly to the center. o Fixed a bug in identifying the factors in 'qgraph.loadings' o Extended and improved the examples of the qgraph.efa, qgraph.pca and qgraph.loadings functions. o Changed qgraph from depending to suggesting the psych, sem and RSVGTipsDevice packages. This enables 64bit users to install the complete package o Similar to 'qgraph', 'qgraph.loadings' 'qgraph.efa' and 'qgraph.pca' now also return (invisible) a list of class "qgraph" containing the arguments. o Removed scaling of the sizes of arrow heads, and added the argument 'asize' to control arrow head size. o Added a new function, 'qgraph.arrow' which will be used to draw the arrowheads instead of 'arrows'. This enables closed arrowheads (by default) as well as better optimization for pdf output. qgraph/COPYING0000644000176200001440000004365514430573263012613 0ustar liggesusers GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.qgraph/R/0000755000176200001440000000000014521123775011744 5ustar liggesusersqgraph/R/qgraph_arrow.R0000644000176200001440000000127214430573263014565 0ustar liggesusersqgraph.arrow=function(x,y,x.orig,y.orig,length,angle=30*pi/180,lwd,col="black", open=TRUE,Xasp=1,lty=1) { warning("This function is no longer supported. Use qgraph:::DrawArrow") x1=x+(x.orig-x)*Xasp x2=x y1=y.orig y2=y r=angle l=length dx=x2-x1 dy=y2-y1 d=sqrt(dx^2/Xasp+dy^2) px=x2-dx*l/d py=y2-dy*l/d Rx = cos(r) * (px-x2) - sin(r) * (py-y2) + x2 Ry = sin(r) * (px-x2) + cos(r) * (py-y2) + y2 Lx = cos(-r) * (px-x2) - sin(-r) * (py-y2) + x2 Ly = sin(-r) * (px-x2) + cos(-r) * (py-y2) + y2 if (open) { lines((c(Rx,x,Lx)-x)/Xasp+x,c(Ry,y,Ly),lwd=lwd,col=col,lty=lty) } else { polygon((c(Lx,x2,Rx)-x)/Xasp+x,c(Ly,y2,Ry),lwd=lwd,col=col,border=NA) } } qgraph/R/qgraph_mixed.R0000644000176200001440000000457514430573263014552 0ustar liggesusersqgraphMixed <- function( undirected, # Adjacency matrix or edgelist of undirected network directed, # Adjacency matrix or edgelist of directed network parallel = TRUE, # Sent to parallelEdge, to indicate this function uses different default parallelAngle = pi/6, # Used similarly as parallelAngle in qgraph diagUndirected = FALSE, # Include diagonal of undirected graph diagDirected = TRUE, # include diagonal of directed graph ltyUndirected = 1, ltyDirected = 1, curve = 1, ... # qgraph arguments ){ # Test if undirected is adjacency matrix and turn it into edgelist: if (nrow(undirected) == ncol(undirected)){ if ((ncol(undirected) == 3 & nrow(undirected) == 3) | (ncol(undirected) == 2 & nrow(undirected) == 2)){ message("Treating 2x2 and 3x3 matrices as adjacency matrix") } incl <- upper.tri(undirected, diag=diagUndirected) undirected <- cbind( row(undirected)[incl], col(undirected)[incl], undirected[incl]) } # Check if three columns. If not, append third: if (ncol(undirected) == 2){ undirected <- cbind(undirected,1) } # Test if undirected is adjacency matrix and turn it into edgelist: if (nrow(directed) == ncol(directed)){ if ((ncol(directed) == 3 & nrow(directed) == 3) | (ncol(directed) == 2 & nrow(directed) == 2)){ message("Treating 2x2 and 3x3 matrices as adjacency matrix") } incl <- matrix(TRUE,nrow(directed), ncol(directed)) if (!diagDirected){ diag(incl) <- FALSE } directed <- cbind( row(directed)[incl], col(directed)[incl], directed[incl]) } # Check if three columns. If not, append third: if (ncol(directed) == 2){ directed <- cbind(directed,1) } # append: Edgelist <- rbind(undirected, directed) if (all(directed[,3] == 1)){ Edgelist <- Edgelist[,1:2] } # Create directed vector: Directed <- c(rep(FALSE,nrow(undirected)), rep(TRUE, nrow(directed))) lty <- c(rep(ltyUndirected,nrow(undirected)), rep(ltyDirected, nrow(directed))) # Create parallelAngle vector: parallelAngle <- ifelse(Directed, -parallelAngle, 0) # Curve (if not parallel): if (parallel){ Curve <- NULL } else { Curve <- ifelse(Directed,-curve,0) } # Run qgraph: invisible(qgraph(Edgelist, parallelEdge = parallel, parallelAngle = parallelAngle, directed = Directed, curve = Curve, lty = lty, ...)) } qgraph/R/VARglm.R0000644000176200001440000000446214430573263013225 0ustar liggesusersVARglm <- function(x,family,vars,adjacency,icfun = BIC,...) { # Returns estimated weights matrix of repeated measures data x ## x must be matrix, rows indicate measures and columns indicate variables # If adjacency is missing, full adjacency is tested # 'family' can be assigned family function (see ?family), list of such ## functions for each variable in x or character vector with names of the ## family functions. # 'vars' must be a vector indicating which variables are predicted, can be useful for parallel implementation. if (missing(x)) stop("'x' must be assigned") x <- as.matrix(x) Ni <- ncol(x) Nt <- nrow(x) # Check input: if (missing(vars)) vars <- 1:Ni No <- length(vars) if (missing(adjacency)) adjacency <- matrix(1,Ni,No) if (is.vector(adjacency)) adjacency <- as.matrix(adjacency) if (!is.matrix(adjacency) && ncol(adjacency)!=No && nrow(adjacency)!=Ni) stop("'adjacency' must be square matrix with a row for each predictor and column for each outcome variable.") if (any(apply(x,2,sd)==0)) { adjacency[apply(x,2,sd)==0,] <- 0 adjacency[,apply(x,2,sd)==0] <- 0 warning("Adjacency matrix adjusted to not include nodes with 0 variance.") } if (missing(family)) { if (identical(c(0,1),sort(unique(c(x))))) family <- rep("binomial",No) else family <- rep("gaussian",No) } if (length(family)==1) { family <- list(family) if (No > 1) for (i in 2:No) family[[i]] <- family[[1]] } if (length(family)!=No) stop("Length of family is not equal to number of outcome variables.") ## Output: Out <- list() Out$graph <- matrix(0,Ni,No) Out$IC <- 0 # Run glms: for (i in 1:No) { if (is.function(family[[i]])) fam <- family[[i]] else fam <- get(family[[i]]) if (any(as.logical(adjacency[,i]))) { tryres <- try(Res <- glm(x[-1,vars[i]] ~ x[-nrow(x),as.logical(adjacency[,i])],family=fam)) if (is(tryres, 'try-error')) Res <- glm(x[-1,vars[i]] ~ NULL,family=fam) } else { Res <- glm(x[-1,vars[i]] ~ NULL,family=fam) } Out$graph[as.logical(adjacency[,i]),i] <- coef(Res)[-1] Out$IC <- Out$IC + icfun(Res,...) } Out$graph[is.na(Out$graph)] <- 0 return(Out) }qgraph/R/DrawArrow.R0000644000176200001440000000510414430573263013777 0ustar liggesusers ### CONVERTS CENTER COORDINATES TO EDGE OF NODE ###: DrawArrow <- function(x,y,r,angle=pi/4,cex,open=FALSE,lwd=1,lty=1,col="black") { r <- r%%(2*pi) xrange <- abs(diff(par("usr")[1:2])) yrange <- abs(diff(par("usr")[3:4])) xmarrange <- sum(par("mai")[c(2,4)]) ymarrange <- sum(par("mai")[c(1,3)]) xin <- par("pin")[1] yin <- par("pin")[2] xLeft <- x + ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*sin(r-angle + pi)/17.5 yLeft <- y + ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex*par("csi")*cos(r-angle + pi)/17.5 xRight <- x + ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*sin(r+angle + pi)/17.5 yRight <- y + ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex*par("csi")*cos(r+angle + pi)/17.5 if (open) { lines(c(xLeft,x,xRight),c(yLeft,y,yRight),lwd=lwd,col=col,lty=lty) } else { polygon(c(xLeft,x,xRight),c(yLeft,y,yRight),lwd=lwd,col=col,border=NA) } } ## Midpoint of arrow: ArrowMidPoint <- function(x,y,r,angle=pi/4,cex) { r <- r%%(2*pi) xrange <- abs(diff(par("usr")[1:2])) yrange <- abs(diff(par("usr")[3:4])) xmarrange <- sum(par("mai")[c(2,4)]) ymarrange <- sum(par("mai")[c(1,3)]) xin <- par("pin")[1] yin <- par("pin")[2] xLeft <- x + ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*sin(r-angle + pi)/17.5 yLeft <- y + ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex*par("csi")*cos(r-angle + pi)/17.5 xRight <- x + ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*sin(r+angle + pi)/17.5 yRight <- y + ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex*par("csi")*cos(r+angle + pi)/17.5 mids <- c((xRight+xLeft)/2,(yRight+yLeft)/2) return(mids) } # Radius of arrow in inches: ArrowRadIn <- function(angle=pi/4,cex) { r <- 0 xrange <- abs(diff(par("usr")[1:2])) yrange <- abs(diff(par("usr")[3:4])) xmarrange <- sum(par("mai")[c(2,4)]) ymarrange <- sum(par("mai")[c(1,3)]) xin <- par("pin")[1] yin <- par("pin")[2] xLeft <- ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*sin(r-angle + pi)/17.5 yLeft <- ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex*par("csi")*cos(r-angle + pi)/17.5 xRight <- ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*sin(r+angle + pi)/17.5 yRight <- ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex*par("csi")*cos(r+angle + pi)/17.5 mids <- c(usr2inX2((xRight+xLeft)/2),usr2inY2((yRight+yLeft)/2)) return(sqrt(sum(mids^2))) } qgraph/R/pathways.R0000644000176200001440000000337714430573263013741 0ustar liggesusers# Function that highlights shortest paths in a network: pathways <- function( graph,# Qgraph object from, # Vector of from indices to, # vector of to indices, if missing to all nodes. fading = 0.25, lty = 3 # layout = c("old","center") ){ stopifnot(is(graph,"qgraph")) # Character: if (is.character(from)){ if (!all(from %in% graph$graphAttributes$Nodes$labels)){ stop("Node label in 'from' argument does not exist") } from <- match(from, graph$graphAttributes$Nodes$labels) } if (missing(to)){ browser() } if (is.character(to)){ if (!all(to %in% graph$graphAttributes$Nodes$labels)){ stop("Node label in 'to' argument does not exist") } to <- match(to, graph$graphAttributes$Nodes$labels) } Cent <- centrality(graph,pkg = "igraph",all.shortest.paths = TRUE) SP <- Cent$ShortestPaths pathList <- matrix(NA,0,2) for (i in from){ for (j in to){ pathList <- rbind(pathList,do.call(rbind,lapply(SP[[i,j]],function(x)cbind(x[-length(x)],x[-1])))) } } highlight <- rep(FALSE,nrow(pathList)) for (i in seq_len(nrow(pathList))){ highlight[i] <- which(graph$Edgelist$from %in% pathList[i,] & graph$Edgelist$to %in% pathList[i,]) } graph$graphAttributes$Edges$color <- Fade(graph$graphAttributes$Edges$color, ifelse(seq_along(graph$Edgelist$from) %in% highlight, 1, fading)) graph$graphAttributes$Edges$lty <- ifelse(seq_along(graph$Edgelist$from) %in% highlight, 1, lty) # Change edgesort to plot changed edges first: graph$graphAttributes$Graph$edgesort <- c( graph$graphAttributes$Graph$edgesort[!graph$graphAttributes$Graph$edgesort %in% highlight], graph$graphAttributes$Graph$edgesort[graph$graphAttributes$Graph$edgesort %in% highlight] ) plot(graph) }qgraph/R/clusteringPlot.R0000644000176200001440000000641514430573263015113 0ustar liggesusersclusteringPlot <- function(..., scale = c("raw0","raw","z-scores", "relative"), labels, include , signed = FALSE, theme_bw = TRUE, print = TRUE, verbose = TRUE, standardized, relative, orderBy = "default", # Can also be one of the measures decreasing = FALSE) { scale <- match.arg(scale) if (!missing(standardized)){ warning("'standardized' argument is deprecated and will be removed.") } else { standardized <- scale == "z-scores" } if (!missing(relative)){ warning("'relative' argument is deprecated and will be removed.") } else { relative <- scale == "relative" } if (scale == "z-scores"){ message("Note: z-scores are shown on x-axis rather than raw centrality indices.") } if (scale == "relative"){ message("Note: relative centrality indices are shown on x-axis rather than raw centrality indices.") } # Some dummies to get rid of NOTES: measure <- NULL value <- NULL node <- NULL type <- NULL Long <- clusteringTable(..., labels=labels, standardized=standardized, relative=relative, signed=signed) Long$value[!is.finite(Long$value)] <- 0 # If not missing, include only include vars: if (!missing(include)) { Long <- subset(Long, measure %in% include) Long$measure <- factor(Long$measure,levels = include) } # Ordereing by node name to make nice paths: # Long <- Long[gtools::mixedorder(Long$node),] # Long$node <- factor(as.character(Long$node), levels = unique(gtools::mixedsort(as.character(Long$node)))) if (orderBy == "default"){ nodeLevels <- unique(gtools::mixedsort(as.character(Long$node), decreasing = decreasing)) } else { nodeLevels <- names(sort(tapply(Long$value[Long$measure == orderBy],Long$node[Long$measure == orderBy],mean), decreasing=decreasing)) } Long$node <- factor(as.character(Long$node), levels = nodeLevels) Long <- Long[gtools::mixedorder(Long$node),] # PLOT: if (length(unique(Long$type)) > 1) { g <- ggplot(Long, aes(x = value, y = node, group = type, colour = type)) } else { g <- ggplot(Long, aes(x = value, y = node, group = type)) } g <- g + geom_path() + xlab("") + ylab("") + geom_point() if (length(unique(Long$graph)) > 1) { g <- g + facet_grid(graph ~ measure, scales = "free") } else { g <- g + facet_grid( ~ measure, scales = "free") } if (theme_bw){ g <- g + theme_bw() } if (scale == "raw0"){ g <-g + xlim(0,NA) } if (print){ print(g) invisible(g) } else { return(g) } } # clusteringPlot <- function(..., labels, signed = FALSE, relative = TRUE) # { # Long <- clusteringTable(..., labels=labels, signed=signed, relative=relative) # # # Ordereing by node name to make nice paths: # Long <- Long[order(Long$node),] # # PLOT: # if (length(unique(Long$graph)) > 1) # { # g <- ggplot(Long, aes(x = value, y = node, group = graph, colour = graph)) + geom_path() + # facet_grid(~ measure, scales = "free") + xlab("") + ylab("") + geom_point() # } else { # g <- ggplot(Long, aes(x = value, y = node, group = graph)) + geom_path() + # facet_grid(~ measure, scales = "free") + xlab("") + ylab("") + geom_point() # } # # return(g) # }qgraph/R/PLOT.R0000644000176200001440000014706714430573263012664 0ustar liggesusersplot.qgraph <- function(x, ...) { ### Extract arguments: # My apologies, dear people that actually read my code, for the right # assignment operator---I was lazy. ## Edgelist: E <- list() E$from <- x$Edgelist$from E$to <- x$Edgelist$to E$weight <- x$Edgelist$weight directed <- x$Edgelist$directed bidirectional <- x$Edgelist$bidirectional # Nodes: bcolor <- x$graphAttributes$Nodes$border.color borders <- x$graphAttributes$Nodes$borders border.width <- x$graphAttributes$Nodes$border.width label.cex <- x$graphAttributes$Nodes$label.cex label.font <- x$graphAttributes$Nodes$label.font lcolor <- x$graphAttributes$Nodes$label.color labels <- x$graphAttributes$Nodes$labels nodeNames <- x$graphAttributes$Nodes$names loopRotation <- x$graphAttributes$Nodes$loopRotation shape <- x$graphAttributes$Nodes$shape vertex.colors <- x$graphAttributes$Nodes$color vsize <- x$graphAttributes$Nodes$width vsize2 <- x$graphAttributes$Nodes$height subplots <- x$graphAttributes$Nodes$subplots images <- x$graphAttributes$Nodes$images # tooltips <- x$graphAttributes$Nodes$tooltips # SVGtooltips <- x$graphAttributes$Nodes$SVGtooltips bars <- x$graphAttributes$Nodes$bars barSide <- x$graphAttributes$Nodes$barSide barColor <- x$graphAttributes$Nodes$barColor barLength <- x$graphAttributes$Nodes$barLength means <- x$graphAttributes$Nodes$means SDs <- x$graphAttributes$Nodes$SDs node.label.offset <- x$graphAttributes$Nodes$node.label.offset node.label.position <- x$graphAttributes$Nodes$node.label.position # Pies: pieColor <- x$graphAttributes$Nodes$pieColor pieColor2 <- x$graphAttributes$Nodes$pieColor2 pieBorder <- x$graphAttributes$Nodes$pieBorder pieStart <- x$graphAttributes$Nodes$pieStart pie <- x$graphAttributes$Nodes$pie pieDarken <- x$graphAttributes$Nodes$pieDarken pieCIs <- x$plotOptions$pieCIs # for BW only bw <- FALSE if(!is.null(x$graphAttributes$Nodes$density)) { density <- x$graphAttributes$Nodes$density bw <- TRUE } else density <- rep(NA, length(shape)) if(!is.null(x$graphAttributes$Nodes$angle)) { angle <- x$graphAttributes$Nodes$angle } else angle <- rep(0, length(shape)) # Edges: curve <- x$graphAttributes$Edges$curve edge.color <- x$graphAttributes$Edges$color edge.labels <- x$graphAttributes$Edges$labels edge.label.cex <- x$graphAttributes$Edges$label.cex edge.label.bg <- x$graphAttributes$Edges$label.bg edge.label.margin <- x$graphAttributes$Edges$label.margin edge.label.font <- x$graphAttributes$Edges$label.font ELcolor <- x$graphAttributes$Edges$label.color edge.width <- x$graphAttributes$Edges$width lty <- x$graphAttributes$Edges$lty edge.label.position <- x$graphAttributes$Edges$edge.label.position asize <- x$graphAttributes$Edges$asize residEdge <- x$graphAttributes$Edges$residEdge CircleEdgeEnd <- x$graphAttributes$Edges$CircleEdgeEnd Pvals <- x$graphAttributes$Edges$Pvals parallelEdge <- x$graphAttributes$Edges$parallelEdge parallelAngle <- x$graphAttributes$Edges$parallelAngle edgeConnectPoints <- x$graphAttributes$Edges$edgeConnectPoints # Knots: knots <- x$graphAttributes$Knots$knots knot.size <- x$graphAttributes$Knots$knot.size knot.color <- x$graphAttributes$Knots$knot.color knot.borders <- x$graphAttributes$Knots$knot.borders knot.border.color <- x$graphAttributes$Knots$knot.border.color knot.border.width <- x$graphAttributes$Knots$knot.border.width # Graph: nNodes <- x$graphAttributes$Graph$nNodes weighted <- x$graphAttributes$Graph$weighted edgesort <- x$graphAttributes$Graph$edgesort scores <- x$graphAttributes$Graph$scores scores.range <- x$graphAttributes$Graph$scores.range groups <- x$graphAttributes$Graph$groups minimum <- x$graphAttributes$Graph$minimum maximum <- x$graphAttributes$Graph$maximum cut <- x$graphAttributes$Graph$cut polygonList <- x$graphAttributes$Graph$polygonList mode <- x$graphAttributes$Graph$mode color <- x$graphAttributes$Graph$color # Layout: layout <- x$layout original.layout <- x$layout.orig # Plot options: # filetype <- x$plotOptions$filetype # filetype <- if (missing(filetype)) x$plotOptions$filetype # filename <- if (missing(filename)) x$plotOptions$filename # normalize <- if (missing(normalize)) x$plotOptions$normalize # plot <- if (missing(plot)) x$plotOptions$plot # mar <- if (missing(mar)) x$plotOptions$mar # GLratio <- if (missing(GLratio)) x$plotOptions$GLratio # legend <- if (missing(legend)) x$plotOptions$legend # legend.cex <- if (missing(legend.cex)) x$plotOptions$legend.cex # pty <- if (missing(pty)) x$plotOptions$pty # XKCD <- if (missing(XKCD)) x$plotOptions$XKCD # arrows <- if (missing(arrows)) x$plotOptions$arrows # arrowAngle <- if (missing(arrowAngle)) x$plotOptions$arrowAngle # open <- if (missing(open)) x$plotOptions$open # curvePivot <- if (missing(curvePivot)) x$plotOptions$curvePivot # curveShape <- if (missing(curveShape)) x$plotOptions$curveShape # curveScale <- if (missing(curveScale)) x$plotOptions$curveScale # curvePivotShape <- if (missing(curvePivotShape)) x$plotOptions$curvePivotShape # label.scale <- if (missing(label.scale)) x$plotOptions$label.scale # label.norm <- if (missing(label.norm)) x$plotOptions$label.norm # label.prop <- if (missing(label.prop)) x$plotOptions$label.prop # overlay <- if (missing(overlay)) x$plotOptions$overlay # details <- if (missing(details)) x$plotOptions$details # legend.mode <- if (missing(legend.mode)) x$plotOptions$legend.mode filetype <- x$plotOptions$filetype filename <- x$plotOptions$filename normalize <- x$plotOptions$normalize plot <- x$plotOptions$plot mar <- x$plotOptions$mar GLratio <- x$plotOptions$GLratio legend <- x$plotOptions$legend legend.cex <- x$plotOptions$legend.cex pty <- x$plotOptions$pty XKCD <- x$plotOptions$XKCD arrows <- x$plotOptions$arrows arrowAngle <- x$plotOptions$arrowAngle open <- x$plotOptions$open curvePivot <- x$plotOptions$curvePivot curveShape <- x$plotOptions$curveShape curveScale <- x$plotOptions$curveScale curveScaleNodeCorrection <- x$plotOptions$curveScaleNodeCorrection curvePivotShape <- x$plotOptions$curvePivotShape label.scale <- x$plotOptions$label.scale label.scale.equal <- x$plotOptions$label.scale.equal label.fill.vertical <- x$plotOptions$label.fill.vertical label.fill.horizontal <- x$plotOptions$label.fill.horizontal label.norm <- x$plotOptions$label.norm label.prop <- x$plotOptions$label.prop # overlay <- x$plotOptions$overlay details <- x$plotOptions$details legend.mode <- x$plotOptions$legend.mode background <- x$plotOptions$background bg <- x$plotOptions$bg residuals <- x$plotOptions$residuals residScale <- x$plotOptions$residScale srt <- x$plotOptions$srt gray <- x$plotOptions$gray # overlaySize <- x$plotOptions$overlaySize plotELBG <- x$plotOptions$plotELBG alpha <- x$plotOptions$alpha width <- x$plotOptions$width height <- x$plotOptions$height aspect <- x$plotOptions$aspect rescale <- x$plotOptions$rescale barsAtSide <- x$plotOptions$barsAtSide bgres <- x$plotOptions$bgres bgcontrol <- x$plotOptions$bgcontrol res <- x$plotOptions$resolution subpars <- x$plotOptions$subpars subplotbg <- x$plotOptions$subplotbg title <- x$plotOptions$title title.cex <- x$plotOptions$title.cex preExpression <- x$plotOptions$preExpression postExpression <- x$plotOptions$postExpression usePCH <- x$plotOptions$usePCH node.resolution <- x$plotOptions$node.resolution noPar <- x$plotOptions$noPar meanRange <- x$plotOptions$meanRange drawPies <- x$plotOptions$drawPies pieRadius <- x$plotOptions$pieRadius pastel <- x$plotOptions$pastel rainbowStart <- x$plotOptions$rainbowStart piePastel <- x$plotOptions$piePastel rm(x) # Some setup vAlpha <- col2rgb(vertex.colors,TRUE)[4,] midX=numeric(0) midY=numeric(0) if (length(E$from)>0) { plotEdgeLabel <- sapply(1:length(E$from),function(i)(is.character(edge.labels[[i]]) | is.expression(edge.labels[[i]]) | is.call(edge.labels[[i]])) && !identical(edge.labels[[i]],'')) } else { plotEdgeLabel <- logical(0) } if (!(is.expression(edge.labels) | is.character(edge.labels) | is.list(edge.labels) )) edge.labels <- as.character(edge.labels) ### Open device: # Start output: if (is.function(filetype)) { filetype(width=width, height = height) filetype <- '' } else { if (filetype=='default') if (is.null(dev.list()[dev.cur()])) dev.new(rescale="fixed",width=width,height=height) if (filetype=='R') dev.new(rescale="fixed",width=width,height=height) # if (filetype=='X11' | filetype=='x11') x11(width=width,height=height) if (filetype=='eps') postscript(paste(filename,".eps",sep=""),height=height,width=width, horizontal=FALSE) if (filetype=='pdf') pdf(paste(filename,".pdf",sep=""),height=height,width=width) if (filetype=='tiff') tiff(paste(filename,".tiff",sep=""),units='in',res=res,height=height,width=width) if (filetype=='png') png(paste(filename,".png",sep=""),units='in',res=res,height=height,width=width) if (filetype=='jpg' | filetype=='jpeg') jpeg(paste(filename,".jpg",sep=""),units='in',res=res,height=height,width=width) if (filetype=="svg") { stop("filetype = 'svg' is no longer supported") # if (R.Version()$arch=="x64") stop("RSVGTipsDevice is not available for 64bit versions of R.") # if (!requireNamespace("RSVGTipsDevice", quietly = TRUE)) stop("Please install 'RSVGTipsDevice' package first.") # RSVGTipsDevice::devSVGTips(paste(filename,".svg",sep=""),width=width,height=height,title=filename) } if (filetype=="tex") { # # Special thanks to Charlie Sharpsteen for supplying these tikz codes on stackoverflow.com !!! # # if (!suppressPackageStartupMessages(require(tikzDevice,quietly=TRUE))) stop("tikzDevice must be installed to use filetype='tex'") # opt= c( # getOption('tikzLatexPackages'), # "\\def\\tooltiptarget{\\phantom{\\rule{1mm}{1mm}}}", # "\\newbox\\tempboxa\\setbox\\tempboxa=\\hbox{}\\immediate\\pdfxform\\tempboxa \\edef\\emptyicon{\\the\\pdflastxform}", # "\\newcommand\\tooltip[1]{\\pdfstartlink user{/Subtype /Text/Contents (#1)/AP <>}\\tooltiptarget\\pdfendlink}" # ) # # place_PDF_tooltip <- function(x, y, text) # { # # # Calculate coordinates # tikzX <- round(grconvertX(x, to = "device"), 2) # tikzY <- round(grconvertY(y, to = "device"), 2) # # Insert node # tikzAnnotate(paste( # "\\node at (", tikzX, ",", tikzY, ") ", # "{\\tooltip{", text, "}};", # sep = '' # )) # invisible() # } # # print("NOTE: Using 'tex' as filetype will take longer to run than other filetypes") # # tikzDevice:::tikz(paste(filename,".tex",sep=""), standAlone = standAlone, width=width, height=height, packages=opt) stop("filetype = 'tex' is no longer supported") } } ### START PLOT: marOrig <- par("mar") bgOrig <- par("bg") if (plot) { if (!noPar) par(mar=c(0,0,0,0), bg=background) plot(1, ann = FALSE, axes = FALSE, xlim = c(-1 - mar[2], 1 + mar[4] + (((legend&is.null(scores))|(filetype=="svg")) * (2+mar[2]+mar[4])/GLratio)), ylim = c(-1 - mar[1] ,1 + mar[3]),type = "n", xaxs = "i", yaxs = "i") # plot(1, ann = FALSE, axes = FALSE, xlim = c(-1 - mar[2], 1 + mar[4] + (((legend&is.null(scores))) * 2.4/GLratio)), ylim = c(-1 - mar[1] ,1 + mar[3]),type = "n", xaxs = "i", yaxs = "i") } # Run preExpression if (!is.null(preExpression)) { eval(parse(text = preExpression)) } # if (PlotOpen) # { width <- par('pin')[1] height <- par('pin')[2] if (rescale & aspect) { l <- original.layout # center: l[,1] <- l[,1] - mean(range(l[,1])) l[,2] <- l[,2] - mean(range(l[,2])) # Ajust for aspect: l[,1] <- l[,1] * min(height/width, 1) l[,2] <- l[,2] * min(width/height, 1) lTemp <- l if (length(unique(lTemp[,1]))>1) { l[,1]=(lTemp[,1]-min(lTemp))/(max(lTemp)-min(lTemp))*2-1 } else l[,1] <- 0 if (length(unique(lTemp[,2]))>1) { l[,2]=(lTemp[,2]-min(lTemp))/(max(lTemp)-min(lTemp))*2-1 } else l[,2] <- 0 # center again for good measures! (I really have no idea why but whatever): l[,1] <- l[,1] - mean(range(l[,1])) l[,2] <- l[,2] - mean(range(l[,2])) rm(lTemp) # # Equalize white space: # if (diff(range(l[,1])) < 2) # { # l[,1] <- diff(range(l[,1]))/2 + l[,1] # } # if (diff(range(l[,2])) < 2) # { # l[,2] <- (2-diff(range(l[,2])))/2 + l[,2] # } layout <- l } # Rescale dims: if (pty=='s') { width=height=min(c(width,height)) } # } if (legend) { width <- width * (GLratio/(1+GLratio)) } # Super cool background: if (is.logical(bg)) if (bg) { colarray=array(dim=c(bgres,bgres,length(groups))) seq=seq(-1.2,1.2,length=bgres+1) for (G in 1:length(groups)) { Xg=layout[groups[[G]],1] Yg=layout[groups[[G]],2] for (i in 1:bgres) { for (j in 1:bgres) { Xp=mean(seq[i:(i+1)]) Yp=mean(seq[j:(j+1)]) colarray[i,j,G]=min(sqrt( (Xp-Xg)^2 + (Yp-Yg)^2)) }}} colarray=((2.2-colarray)/2.2)^bgcontrol colarray2=array(dim=c(3,bgres,bgres)) # } # # if (is.logical(bg)) if (bg){ # for (i in 1:bgres) { for (j in 1:bgres) { for (C in 1:3) { colarray2[C,i,j]=min(c(1,max(colarray[i,j,]*(col2rgb(color)[C,]/255)))) } polygon(c(seq[i],seq[i+1],seq[i+1],seq[i]),c(seq[j],seq[j],seq[j+1],seq[j+1]), col=rgb(colarray2[1,i,j],colarray2[2,i,j],colarray2[3,i,j]),border=rgb(colarray2[1,i,j],colarray2[2,i,j],colarray2[3,i,j])) } } } # Compute normalizing constant: if (isTRUE(normalize)) { normC <- sqrt(sum(par("pin")^2)) / sqrt(7^2 + 7^2) vsize <- vsize * normC vsize2 <- vsize2 * normC edge.width <- edge.width * normC border.width <- border.width * normC asize <- asize * normC edge.label.cex <- edge.label.cex * normC knot.size <- knot.size * normC knot.border.width <- knot.border.width * normC residScale <- residScale * normC } ## Normalize curve (linear to half of diagonal in user coordinates): if (isTRUE(curveScale)) { if (isTRUE(curveScaleNodeCorrection)){ curveScaleNodeCorrection <- nNodes } else curveScaleNodeCorrection <- 1 usr <- par("usr") AverageLength <- sqrt(((usr[2]-usr[1]) * (usr[4]-usr[3])) / curveScaleNodeCorrection) EdgeLenghts <- sqrt((layout[E$to,1] - layout[E$from,1])^2 + (layout[E$to,2] - layout[E$from,2])^2) curve <- curve * EdgeLenghts /AverageLength } ##### MAKE SUBPLOTS FOR PIE CHARTS ##### # if (drawPies){ # # Parse expressions: # subplots <- mapply(width = border.width, bg = vertex.colors, x = pie, R = pieRadius, bord = pieBorder, col1 = pieColor, col2 = pieColor2, FUN = function(width, bg, x, R, bord, col1, col2){ # parse(text=paste0('qgraph:::pie2(x=',x,', label="", radius=',R ,', pie.bord=',bord,', pie.col = "',col1,'", pie.col2 = "',col2,'", # bg = "',bg,'", border.width = ',width,')')) # }, SIMPLIFY = FALSE) # # } # Create 'omitEdge' vector to make sure bidirectional edges are not plotted. if (any(bidirectional)) { omitEdge <- duplicated(srt)&bidirectional } else omitEdge <- NULL # If images is not NULL, replace subplots with images calls: if (!is.null(images)) { images <- gsub("\\\\","/", images) if (length(images) == 1) images <- rep(images, nNodes) if (is.null(subplots)) subplots <- vector( "list", nNodes) for (i in seq_along(images)) { if (!is.na(images[i]) && file.exists(images[[i]])) { if (grepl("\\.jpe?g$",images[i])) { subplots[[i]] <- parse(text=sprintf(' plot(1,type="n",xlim=0:1,ylim=0:1,axes=FALSE,xlab="",ylab="",bty="n",xaxs="i",yaxs="i") rasterImage(readJPEG("%s"), 0,0,1,1, interpolate=FALSE)', images[i])) } else if (grepl("\\.png$",images[i])) { subplots[[i]] <- parse(text=sprintf(' plot(1,type="n",xlim=0:1,ylim=0:1,axes=FALSE,xlab="",ylab="",bty="n",xaxs="i",yaxs="i") rasterImage(readPNG("%s"), 0,0,1,1, interpolate=FALSE)', images[i])) } else warning("Only jpeg and png images supported in 'images'") } } } # Set non-rectangular/square dge shapes with subplots to square: # if (!is.null(subplots)) # { # # Get which nodes become a subplot: # whichsub <- which(sapply(subplots,function(x)is.expression(x)|is.function(x))) # # shape[whichsub][!shape[whichsub]%in%c("square","rectangle")] <- "square" # } # Plot edges: if (length(curve)==1) curve=rep(curve,length(edgesort)) curve[E$from==E$to]=1 # Compute knot placement: if (length(knots) > 0) { knotLayout <- matrix(,max(knots),2) for (i in seq_len(max(knots))) { knotNodes <- c(E$from[knots==i],E$to[knots==i]) # mid X: knotLayout[i,1] <- in2usrX(mean(usr2inX(layout[knotNodes,1]))) knotLayout[i,2] <- in2usrY(mean(usr2inY(layout[knotNodes,2]))) } } else { knotLayout <- matrix(,0,0) } # For each (sorted from weak to strong) edge: for (i in edgesort) { # Only plot if over minimum: if (abs(E$weight[i])>minimum & !isTRUE(omitEdge[i])) { x1=layout[E$from[i],1] x2=layout[E$to[i],1] y1=layout[E$from[i],2] y2=layout[E$to[i],2] # If not curved, knotted or XKCD plot straigth line instead of spline: if (curve[i]==0 & !XKCD & knots[i] == 0) { # Replace destination to fixed points if specified in edgeConnectPoints: if (!is.null(edgeConnectPoints) && !is.na(edgeConnectPoints[i,2])) { NewPoints <- Cent2Edge(x2,y2,edgeConnectPoints[i,2],vsize[E$to[i]],vsize2[E$to[i]],shape[E$to[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x2 <- NewPoints[1] y2 <- NewPoints[2] } else { # Replace destination of edge to edge of node if needed: # if (parallelEdge[i] | is.logical(arrows) | vAlpha[E$to[i]] < 255) if (parallelEdge[i] | (isTRUE(arrows) & directed[i]) | vAlpha[E$to[i]] < 255) { NewPoints <- Cent2Edge(x2,y2,ifelse(residEdge[i],loopRotation[E$to[i]],atan2usr2in(x1-x2,y1-y2) + parallelEdge[i]*parallelAngle[i]),vsize[E$to[i]],vsize2[E$to[i]],shape[E$to[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x2 <- NewPoints[1] y2 <- NewPoints[2] } } # Replace source to fixed points if specified in edgeConnectPoints: if (!is.null(edgeConnectPoints) && !is.na(edgeConnectPoints[i,1])) { NewPoints <- Cent2Edge(x1,y1,edgeConnectPoints[i,1],vsize[E$from[i]],vsize2[E$from[i]],shape[E$from[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x1 <- NewPoints[1] y1 <- NewPoints[2] } else { # Replace source of edge to edge of node if needed: if (parallelEdge[i] | plotEdgeLabel[i] || (any(E$from==E$to[i] & E$to==E$from[i]) & bidirectional[i]) | vAlpha[E$from[i]] < 255) { NewPoints <- Cent2Edge(x1,y1,ifelse(residEdge[i],loopRotation[E$from[i]],atan2usr2in(x2-x1,y2-y1) - parallelEdge[i]*parallelAngle[i]),vsize[E$from[i]],vsize2[E$from[i]],shape[E$from[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x1 <- NewPoints[1] y1 <- NewPoints[2] } } if (plotEdgeLabel[i]) { midX[i] <- ((1-edge.label.position[i])*x1 + edge.label.position[i]*x2) midY[i] <- ((1-edge.label.position[i])*y1 + edge.label.position[i]*y2) } ## Plot edges and arrows: drawEdge(c(x1,x2),c(y1,y2), col=edge.color[i], lwd=edge.width[i], arrowlwd=asize[i], lty=lty[i],directed=directed[i], bidirectional=any(E$from==E$to[i] & E$to==E$from[i]) & bidirectional[i], arrows=arrows, arrowAngle=arrowAngle, open=open) # # lines(c(x1,x2),c(y1,y2),lwd=edge.width[i],col=edge.color[i],lty=lty[i]) # if (directed[i]) # { # if (!is.logical(arrows)) # { # Ax=seq(x1,x2,length=arrows+2) # Ay=seq(y1,y2,length=arrows+2) # for (a in 1:arrows+1) # { # # qgraph.arrow(Ax[a],Ay[a],x1,y1,length=asize[i],angle=30*pi/180,lwd=max(edge.width[i]/2,1), # # col=edge.color[i],open=open,Xasp=width/height,lty=lty[i]) # DrawArrow(Ax[a],Ay[a],atan2usr2in(Ax[a]-x1,Ay[a]-y1),angle=arrowAngle,cex=asize[i],open=open,lwd=max(edge.width[i]/2,1),lty=lty[i],edge.color[i]) # } # } # else if (arrows) # { # # qgraph.arrow(x2,y2,x1,y1,length=asize[i],angle=30*pi/180,lwd=max(edge.width[i]/2,1), # # col=edge.color[i],open=open,Xasp=width/height,lty=lty[i]) # DrawArrow(x2,y2,atan2usr2in(x2-x1,y2-y1),angle=arrowAngle,cex=asize[i],open=open,lwd=max(edge.width[i]/2,1),lty=lty[i],edge.color[i]) # # if (any(E$from==E$to[i] & E$to==E$from[i]) & bidirectional[i]) # { # # qgraph.arrow(x1,y1,x2,y2,length=asize[i],angle=30*pi/180,lwd=max(edge.width[i]/2,1), # # col=edge.color[i],open=open,Xasp=width/height,lty=lty[i]) # DrawArrow(x1,y1,atan2usr2in(x1-x2,y1-y2),angle=arrowAngle,cex=asize[i],open=open,lwd=max(edge.width[i]/2,1),lty=lty[i],edge.color[i]) # } # } # } } else { if (E$from[i]==E$to[i]) { # loopX=loop*3*(0.5*vsize[E$to[i]]*0.130*(7/width)*par("cin")[2]) # spx=c(x1+loopX,x1,x1-loopX) # loopY=loop*3*(0.5*vsize[E$to[i]]*0.130*(7/height)*par("cin")[2]) # spy=c(y1,y1+loopY,y1) # spl <- spl2 <- xspline(c(x1,spx,x2),c(y1,spy,y2),1,draw=FALSE) spl <- SelfLoop(x1,y1,loopRotation[E$from[i]],vsize[E$from[i]],vsize2[E$from[i]],shape[E$from[i]],residuals,residScale,polygonList,offset=ifelse(XKCD,2,0)) } else { #spx <- midx - curve[i] * (y2 - y1)/2 #spy <- midy + curve[i] * (x2 - x1)/2 # curvemid <- Cent2Edge(midx,midy,atan2usr2in(x2-x1,y2-y1)-sign(curve[i])*pi/2,abs(curve[i])*5*2,"circle") if (knots[i]!=0) { spl <- xspline(c(x1,knotLayout[knots[i],1],x2),c(y1,knotLayout[knots[i],2],y2),0,draw=FALSE) } else { # midx <- (x1 + x2)/2 # midy <- (y1 + y2)/2 curvemid <- PerpMid(c(x1,y1),c(x2,y2),cex=curve[i]) # Add pivots: if (is.numeric(curvePivot)) { splShape <- c(curveShape, curvePivotShape, curveShape, curvePivotShape, curveShape) curveQ1 <- PerpMid(c(x1,y1),c(x2,y2),cex=curve[i], q = curvePivot) curveQ2 <- PerpMid(c(x1,y1),c(x2,y2),cex=curve[i], q = 1-curvePivot) spx <- c(curveQ1[1] , curvemid[1], curveQ2[1]) spy <- c(curveQ1[2] , curvemid[2], curveQ2[2]) } else { splShape <- rep(curveShape,3) spx <- curvemid[1] spy <- curvemid[2] } spl=xspline(c(x1,spx,x2),c(y1,spy,y2),splShape,draw=FALSE) } } if (E$from[i]!=E$to[i]) { recurve <- FALSE # Replace source to fixed points if specified in edgeConnectPoints: if (!is.null(edgeConnectPoints) && !is.na(edgeConnectPoints[i,1])) { NewPoints <- Cent2Edge(x1,y1,edgeConnectPoints[i,1],vsize[E$from[i]],vsize2[E$from[i]],shape[E$from[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x1 <- NewPoints[1] y1 <- NewPoints[2] } # Replace destination to fixed points if specified in edgeConnectPoints: if (!is.null(edgeConnectPoints) && !is.na(edgeConnectPoints[i,2])) { NewPoints <- Cent2Edge(x2,y2,edgeConnectPoints[i,2],vsize[E$to[i]],vsize2[E$to[i]],shape[E$to[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x2 <- NewPoints[1] y2 <- NewPoints[2] recurve <- TRUE } # Replace destination of edge to edge of node if needed: # if (is.logical(arrows)| vAlpha[E$to[i]] < 255) # { if (parallelEdge[i] || isTRUE(arrows) & directed[i]| vAlpha[E$to[i]] < 255 | vAlpha[E$from[i]] < 255) { if (is.null(edgeConnectPoints) || is.na(edgeConnectPoints[i,2])) { NewPoints <- Cent2Edge(x2,y2,ifelse(residEdge[i],loopRotation[E$to[i]],atan2usr2in(spl$x[length(spl$x)-1]-x2,spl$y[length(spl$y)-1]-y2)) + parallelEdge[i]*parallelAngle[i],vsize[E$to[i]],vsize2[E$to[i]],shape[E$to[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x2 <- NewPoints[1] y2 <- NewPoints[2] recurve <- TRUE } if (is.null(edgeConnectPoints) || is.na(edgeConnectPoints[i,1])) { NewPoints <- Cent2Edge(x1,y1,ifelse(residEdge[i],loopRotation[E$from[i]],atan2usr2in(spl$x[2]-x1,spl$y[2]-y1)) - parallelEdge[i]*parallelAngle[i],vsize[E$from[i]],vsize2[E$from[i]],shape[E$from[i]],ifelse(residEdge[i],residScale,ifelse(XKCD,2,0)), polygonList) x1 <- NewPoints[1] y1 <- NewPoints[2] recurve <- TRUE } } # } if (recurve) { # # Update curve if needed: # if (isTRUE(curveScale)) # { # usr <- par("usr") # AverageLength <- sqrt(((usr[2]-usr[1]) * (usr[4]-usr[3])) / nNodes) # EdgeLenght <- sqrt((x2 - x1)^2 + (y2 - y1)^2) # curve[i] <- curve[i] * EdgeLenght /AverageLength # } if (knots[i]!=0) { spl <- xspline(c(x1,knotLayout[knots[i],1],x2),c(y1,knotLayout[knots[i],2],y2),0,draw=FALSE) } else { if (residEdge[i]) curvemid <- PerpMid(c(x1,y1),c(x2,y2),cex=curve[i]) # Add pivots: if (is.numeric(curvePivot)) { splShape <- c(curveShape, curvePivotShape, curveShape, curvePivotShape, curveShape) curvemid <- PerpMid(c(x1,y1),c(x2,y2),cex=curve[i]) curveQ1 <- PerpMid(c(x1,y1),c(x2,y2),cex=curve[i], q = curvePivot) curveQ2 <- PerpMid(c(x1,y1),c(x2,y2),cex=curve[i], q = 1-curvePivot) spx <- c(curveQ1[1] , curvemid[1], curveQ2[1]) spy <- c(curveQ1[2] , curvemid[2], curveQ2[2]) } else { spx <- curvemid[1] spy <- curvemid[2] } # Check if midpoint is not curved but indented:r tans <- atan2(x2-x1,y2-y1) - (atan2(spx-x1,spy-y1) + c(-2*pi,0,2*pi)) tans <- tans[which.min(abs(tans))] # if (sign(atan2(x2-x1,y2-y1) - atan2(spx-x1,spy-y1)) != sign(curve[i])) if (sign(tans) != sign(curve[i])) { spl <- list(x = seq(x1, x2, length=length(spx)+2), y = seq(y1, y2, length=length(spx)+2)) } else { spl=xspline(c(x1,spx,x2),c(y1,spy,y2),splShape,draw=FALSE) } } } } # If XKCD jitter edge: if (XKCD) { jitt <- xkcd_jitter(spl$x,spl$y) spl$x[3:(length(spl$x)-3)] <- jitt$x[3:(length(spl$x)-3)] spl$y[3:(length(spl$y)-3)] <- jitt$y[3:(length(spl$y)-3)] } # If XKCD extra white edge: if (XKCD) { lines(spl,lwd=edge.width[i]*2,col="white") } if (plotEdgeLabel[i]) { if (E$from[i] != E$to[i] && knots[i] == 0 && edge.label.position[i] == 0.5) { midX[i] <- curvemid[1] midY[i] <- curvemid[2] } else { midX[i]=spl$x[round(edge.label.position[i]*length(spl$x))] midY[i]=spl$y[round(edge.label.position[i]*length(spl$y))] } } ### Plot Edges and arrows: drawEdge(spl$x,spl$y, col=edge.color[i], lwd=edge.width[i], arrowlwd=asize[i], lty=lty[i],directed=directed[i], bidirectional=any(E$from==E$to[i] & E$to==E$from[i]) & bidirectional[i], arrows=arrows, arrowAngle=arrowAngle, open=open) # lines(spl,lwd=edge.width[i],col=edge.color[i],lty=lty[i]) # # if (directed[i]) # { # if (!is.logical(arrows)) # { # Ax=seq(1,length(spl$x),length=arrows+2) # Ay=seq(1,length(spl$y),length=arrows+2) # for (a in 2:(arrows+1)) # { # # qgraph.arrow(spl$x[Ax[a]+1],spl$y[Ay[a]+1],spl$x[Ax[a]],spl$y[Ay[a]],length=asize[i],angle=30*pi/180,lwd=max(edge.width[i]/2,1), # # col=edge.color[i],open=open,Xasp=width/height,lty=lty[i]) # # DrawArrow(spl$x[Ax[a]+1],spl$y[Ay[a]+1],atan2usr2in(spl$x[Ax[a]+1]-spl$x[Ax[a]],spl$y[Ay[a]+1]-spl$y[Ay[a]]),angle=arrowAngle,cex=asize[i],open=open,lwd=max(edge.width[i]/2,1),lty=lty[i],edge.color[i]) # } # } # else if (arrows) # { # # qgraph.arrow(spl$x[length(spl$x)],spl$y[length(spl$y)],spl$x[length(spl$x)-1],spl$y[length(spl$y)-1],length=asize[i],angle=30*pi/180,lwd=max(edge.width[i]/2,1), # # col=edge.color[i],open=open,Xasp=width/height,lty=lty[i]) # DrawArrow(spl$x[length(spl$x)],spl$y[length(spl$y)],atan2usr2in(spl$x[length(spl$x)]-spl$x[length(spl$x)-1],spl$y[length(spl$y)]-spl$y[length(spl$y)-1]),angle=arrowAngle,cex=asize[i],open=open,lwd=max(edge.width[i]/2,1),lty=lty[i],edge.color[i]) # # if (any(E$from==E$to[i] & E$to==E$from[i]) & bidirectional[i]) # { # # qgraph.arrow(spl$x[1],spl$y[1],spl$x[2],spl$y[2],length=asize[i],angle=30*pi/180,lwd=max(edge.width[i]/2,1), # # col=edge.color[i],open=open,Xasp=width/height,lty=lty[i]) # DrawArrow(spl$x[1],spl$y[1],atan2usr2in(spl$x[1]-spl$x[2],spl$y[1]-spl$y[2]),angle=arrowAngle,cex=asize[i],open=open,lwd=max(edge.width[i]/2,1),lty=lty[i],edge.color[i]) # } # } # # } } } } # Plot knots: if (any(knots>0)) { if (length(knot.size)==1) knot.size <- rep(knot.size,length=max(knots)) if (length(knot.color)==1) knot.color <- rep(knot.color,length=max(knots)) if (length(knot.borders)==1) knot.borders <- rep(knot.borders,length=max(knots)) if (length(knot.border.color)==1) knot.border.color <- rep(knot.border.color,length=max(knots)) if (length(knot.border.color)==1) knot.border.color <- rep(knot.border.width,length=max(knots)) for (i in 1:max(knots)) if (is.na(knot.color[i])) knot.color[i] <- mixCols(edge.color[knots==i]) if (any(knot.borders)) { for (i in 1:max(knots)) { points(knotLayout[i,1],knotLayout[i,2],cex=knot.size[i],col=knot.color[i],pch=16) if (knot.borders[i]) points(knotLayout[i,1],knotLayout[i,2],cex=knot.size[i],col=knot.border.color[i],pch=1) } } else points(knotLayout[,1],knotLayout[,2],cex=knot.size,col=knot.color,pch=16) } # Edge labels if (is.null(ELcolor)) { ELcolor <- edge.color } if (!is.logical(edge.labels) & length(edge.labels)>0) { # Fix midpoints for knots: for (i in seq_len(max(knots))) { midX[knots==i] <- knotLayout[i,1] midY[knots==i] <- knotLayout[i,2] } edgesort2 <- edgesort[abs(E$weight[edgesort])>minimum] edgesort2 <- edgesort2[!(duplicated(srt[edgesort2,,drop=FALSE])&bidirectional[edgesort2]) & (!duplicated(knots[edgesort2])|knots[edgesort2]==0)] if (length(edge.label.cex)==1) edge.label.cex <- rep(edge.label.cex,length(E$from)) if (plotELBG) { for (i in edgesort2) { # if (((is.character(edge.labels[[i]]) | is.expression(edge.labels[[i]]) | is.call(edge.labels[[i]])) && !identical(edge.labels[[i]],'')) || length(edge.labels) == 0) #if ((is.character(edge.labels[[i]]) | is.expression(edge.labels[[i]]) | is.call(edge.labels[[i]])) && !identical(edge.labels[[i]],'')) if (plotEdgeLabel[i]) { labwd <- strwidth(edge.labels[[i]],cex=edge.label.cex[i]) labht <- strheight(edge.labels[[i]],cex=edge.label.cex[i]) marEL <- edge.label.margin[i] polygon(c(midX[i]-labwd/2-marEL,midX[i]+labwd/2+marEL,midX[i]+labwd/2+marEL,midX[i]-labwd/2-marEL), c(midY[i]-labht/2-marEL,midY[i]-labht/2-marEL,midY[i]+labht/2+marEL,midY[i]+labht/2+marEL), border=NA, col=edge.label.bg[i]) } } } if (!is.list(edge.labels)) { text(midX[edgesort2],midY[edgesort2],edge.labels[edgesort2],cex=edge.label.cex[edgesort2],col=ELcolor[edgesort2], font = edge.label.font[edgesort2], adj = c(0.5, 0.5)) } else { for (i in edgesort2) { text(midX[i],midY[i],edge.labels[[i]],font=edge.label.font[i],cex=edge.label.cex[i],col=ELcolor[i], adj = c(0.5, 0.5)) } } } #if (nNodes==1) layout=matrix(0,1,2) # Plot nodes: # scale border width: # border.width <- border.width * normC # If usePCH = NULL, detect if device or resizable R plot: if (is.null(usePCH)) usePCH <- grepl("(RStudioGD)|(x11)|(X11)|(quartz)|(windows)",dev.cur(), ignore.case = TRUE) if (!XKCD) { # Check if nodes need to be plotted in for loop: if (!usePCH || !is.null(subplots) || any(shape=="rectangle") || !all(shape %in% c("circle","square","triangle","diamond")) || any(sapply(bars,length) > 0) & !all(is.na(means)) & !all(is.na(SDs))) { # Get which nodes become a subplot: # whichsub <- which(sapply(subplots,function(x)is.expression(x)|is.function(x))) # # Plot normal nodes: # bordVec <- unlist(lapply(order(vsize*vsize2,decreasing=FALSE),function(x)rep(x,1+borders[x]))) # bordVec <- bordVec[!bordVec%in%whichsub] # points(layout[bordVec,],cex=vsize[bordVec],col=ifelse(duplicated(bordVec),bcolor[bordVec],vertex.colors[bordVec]),lwd=border.width,pch=ifelse(duplicated(bordVec),pch2[bordVec],pch1[bordVec])) for (i in order(vsize*vsize2,decreasing=TRUE)) { x <- layout[i,1] y <- layout[i,2] if (isTRUE(is.expression(subplots[[i]]))) { xOff <- Cent2Edge(x,y,pi/2,vsize[i],vsize2[i],shape[i], offset=0, polygonList=polygonList)[1] - x yOff <- Cent2Edge(x,y,0,vsize[i],vsize2[i],shape[i], offset=0, polygonList=polygonList)[2] - y usr <- par("usr") # Plot background: rect(max(usr[1],x-xOff),max(usr[3],y-yOff),min(usr[2],x+xOff),min(usr[4],y+yOff),col=subplotbg,border=NA) # Plot subplot: subplot(eval(subplots[[i]],envir=globalenv()),c(max(usr[1],x-xOff),min(usr[2],x+xOff)), c(max(usr[3],y-yOff),min(usr[4],y+yOff)), pars = subpars) # Plot border: if (borders[i]) rect(x-xOff,y-yOff,x+xOff,y+yOff,border=bcolor[i],lwd=border.width[i]) } else { drawNode(x, y, shape[i], vsize[i], vsize2[i], borders[i], vertex.colors[i], bcolor[i], border.width[i], polygonList, bars[[i]], barSide[i], barColor[i], barLength[i], barsAtSide, usePCH = usePCH, resolution = node.resolution, noPar = noPar, bw = bw, density = density[i], angle = angle[i], mean=means[i],SD=SDs[i],meanRange=meanRange,pie=pie[[i]],pieColor=pieColor[[i]],pieColor2=pieColor2[[i]], pieBorder=pieBorder[[i]],pieStart=pieStart[[i]],pieDarken=pieDarken[[i]],pastel=piePastel, rainbowStart=rainbowStart,equalPieColor = pieCIs) } } } else { pch1=numeric(0) pch2=numeric(0) for (i in 1:length(shape)) { if (shape[i]=="circle") { pch1[i]=16 pch2[i]=1 } if (shape[i]=="square") { pch1[i]=15 pch2[i]=0 } if (shape[i]=="triangle") { pch1[i]=17 pch2[i]=2 } if (shape[i]=="diamond") { pch1[i]=18 pch2[i]=5 } if (!shape[i]%in%c("circle","square","triangle","diamond")) stop(paste("Shape",shape[i],"is not supported")) } bordVec <- unlist(lapply(order(vsize,decreasing=FALSE),function(x)rep(x,1+borders[x]))) points(layout[bordVec,],cex=vsize[bordVec],col=ifelse(duplicated(bordVec),bcolor[bordVec],vertex.colors[bordVec]),lwd=border.width[bordVec],pch=ifelse(duplicated(bordVec),pch2[bordVec],pch1[bordVec])) } # points(layout,cex=vsize,col=vertex.colors,pch=pch1) # # if (any(borders) & nNodes > 1) points(layout[borders,],cex=vsize[borders],lwd=border.width,pch=pch2[borders],col=bcolor[borders]) # # if (any(borders) & nNodes == 1) points(layout,cex=vsize[borders],lwd=border.width,pch=pch2[borders],col=bcolor[borders]) } else { circ <- seq(0,2*pi,length=100) for (i in 1:nNodes) { pts <- lapply(circ,function(r)Cent2Edge(layout[i,1],layout[i,2],r,vsize[i],vsize2[i],shape[i],0,polygonList)) mod <- xkcd_jitter(sapply(pts,'[',1),sapply(pts,'[',2),2000) if (borders[i]) { polygon(mod$x,mod$y,border="white",col=NA,lwd=10) polygon(mod$x,mod$y,border="black",col=vertex.colors[i],lwd=5) } else { polygon(mod$x,mod$y,border="white",col=NA,lwd=10) polygon(mod$x,mod$y,border=NULL,col=vertex.colors[i],lwd=5) } } } if (any(labels != '')) { # labels=as.character(labels) # Vertex label symbols: # Set symbol font: # if (is.character(labels)) # { # strsplV=strsplit(labels,"") # greekV=logical(0) # for (i in 1:length(strsplV)) # { # greekV[i]=any(strsplV[[i]]=="*") # labels[i]=paste(strsplV[[i]][which(strsplV[[i]]!="*")],collapse="") # } # V.font=rep(1,length(E$from)) # V.font[greekV]=5 # } else V.font <- 1 if (is.null(label.cex)) label.cex <- pmax(1,vsize) # Rescale labels: if (label.scale) { ones <- rep(1, nNodes) VWidths <- sapply(mapply(Cent2Edge,cex=vsize,cex2=vsize2,shape=shape,MoreArgs=list(x=0,y=0,r=pi/2,polygonList=polygonList, noPar = noPar),SIMPLIFY=FALSE),'[',1) * 2 VHeights <- sapply(mapply(Cent2Edge,cex=vsize,cex2=vsize2,shape=shape,MoreArgs=list(x=0,y=0,r=0,polygonList=polygonList, noPar = noPar),SIMPLIFY=FALSE),'[',2) * 2 # LWidths <- pmax(sapply(label.cex,function(x)strwidth(label.norm,cex=x)),mapply(strwidth, s=labels, cex=label.cex)) # LHeights <- pmax(sapply(label.cex,function(x)strheight(label.norm,cex=x)),mapply(strheight, s=labels, cex=label.cex)) # LWidths <- pmax(sapply(ones,function(x)strwidth(label.norm,cex=x)),mapply(strwidth, s=labels, cex=ones)) LHeights <- pmax(sapply(ones,function(x)strheight(label.norm,cex=x)),mapply(strheight, s=labels, cex=ones)) label.cex <- label.cex * label.prop * pmin((VWidths*label.fill.horizontal)/LWidths,(VHeights*label.fill.vertical)/LHeights) # label.cex[nchar(labels)>1]=label.cex[nchar(labels)>1]*2/nchar(labels[nchar(labels)>1],"width") # Equalize: if (!identical(label.scale.equal,FALSE)){ if (isTRUE(label.scale.equal)){ label.scale.equal <- rep(1,length(label.cex)) } label.cex[] <- ave(label.cex,label.scale.equal,FUN=min) } } # Plot labels: if (!is.list(labels)) { # text(layout[,1],layout[,2],labels,cex=label.cex,col=lcolor,font=label.font, adj = c(0.5, 0.5)) text(layout[,1],layout[,2],labels,cex=label.cex,col=lcolor,font=label.font, adj=node.label.offset, pos=node.label.position) } else { lcolor <- rep(lcolor,length=nNodes) for (i in seq_along(labels)) { # text(layout[i,1],layout[i,2],labels[[i]],cex=label.cex[i],col=lcolor[i],font=label.font[i], adj = c(0.5, 0.5)) text(layout[i,1],layout[i,2],labels[[i]],cex=label.cex[i],col=lcolor[i],font=label.font[i], adj=node.label.offset, pos=node.label.position) } } } # # if (!is.null(tooltips)) # { # # Set Tooltips: # for (i in 1:nNodes) # { # if (!is.na(tooltips[i])) # { # if (filetype=='svg') RSVGTipsDevice::setSVGShapeToolTip(desc=tooltips[i]) # } # if (!is.null(SVGtooltips)) if (!is.na(SVGtooltips[i])) # { # RSVGTipsDevice::setSVGShapeToolTip(desc=SVGtooltips[i]) # } # NodeOutline <- lapply(seq(0,2*pi,length=10),function(r)Cent2Edge(layout[i,1],layout[i,2],r,vsize[i],vsize2[i],shape[i],offset=0,polygonList)) # polygon(sapply(NodeOutline,'[',1),sapply(NodeOutline,'[',2),col="#01010101",border=NA) # # } # } ### CIRCLES AT END EDGES (RANDOM INTERCEPTS) ### if (any(CircleEdgeEnd)) { for (i in which(CircleEdgeEnd)) { if (abs(E$weight[i]) > minimum) { # Center of destination node: x <- layout[E$to[i],1] y <- layout[E$to[i],2] # Edge entry point: if (!is.null(edgeConnectPoints) && !is.na(edgeConnectPoints[i,2])) { edge <- Cent2Edge(x,y,edgeConnectPoints[i,2],vsize[E$to[i]],vsize2[E$to[i]],shape[E$to[i]],offset=0, polygonList) } else { r <- atan2usr2in(layout[E$from[i],1] - x, layout[E$from[i],2] - y ) edge <- Cent2Edge(x,y,r,vsize[E$to[i]],vsize2[E$to[i]],shape[E$to[i]],offset=0, polygonList) } # Size of bal: sizeBal <- mean(vsize[E$to[i]],vsize2[E$to[i]]) / 4 # Center of bal: r <- atan2usr2in(x-edge[1], y-edge[2] ) ball <- Cent2Edge(edge[1],edge[2],r,sizeBal,sizeBal,'circle',offset=0, polygonList) # Draw ball: points(ball[1], ball[2], pch = 16, col = edge.color[i], cex = sizeBal) } } } ### Overlay: # if (overlay) # { # # Transparance in vertex colors: # num2hex <- function(x) # { # hex=unlist(strsplit("0123456789ABCDEF",split="")) # return(paste(hex[(x-x%%16)/16+1],hex[x%%16+1],sep="")) # } # # colHEX <- rgb(t(col2rgb(color)/255)) # # fillCols <- paste(sapply(strsplit(colHEX,split=""),function(x)paste(x[1:7],collapse="")),num2hex(25),sep="") # # for (i in 1:length(groups)) # { # if (length(groups[[i]]) > 1) # { # polygon(ellipse(cov(layout[groups[[i]],,drop=FALSE]),centre=colMeans(layout[groups[[i]],,drop=FALSE]),level=overlaySize),border=color[i],col=fillCols[i]) # } # } # } # if (is.null(names(groups))) names(groups) <- LETTERS[1:length(groups)] #if (!legend && filetype=="svg") plot(1, ann = FALSE, axes = FALSE, xlim = c(-1, 1), ylim = c(-1 ,1 ),type = "n", xaxs = "i", yaxs = "i") # Plot Legend: if (legend) { if (is.null(scores)) { legend.cex=legend.cex*2 #plot(1, ann = FALSE, axes = FALSE, xlim = c(-1, 1), ylim = c(-1 ,1 ),type = "n", xaxs = "i", yaxs = "i") if (mode=="sig") { if (legend.mode == "names") { text(1 + mar[4] ,0, paste(labels,": ",nodeNames,sep="",collapse="\n"), cex=legend.cex, adj = c(0, 0.5)) } else { if (length(groups) > 1) { legend (1 + mar[4] + 0.5 * 2.4/GLratio,0, names(groups), col= color ,pch = 19, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') legend (1 + mar[4] + 0.5 * 2.4/GLratio,0, names(groups), col= "black" ,pch = 1, xjust=0.5, ,yjust=0.5, cex=legend.cex, bty='n') } } if (gray) { legend(1 + mar[4] + 0.5 * 2.4/GLratio,(length(groups) > 1) * -0.5,paste("p <",alpha[length(alpha):1]), col = c(rgb(0.7,0.7,0.7),rgb(0.5,0.5,0.5),rgb(0.3,0.3,0.3),"black")[(5-length(alpha)):4], lty=1, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') } else { if (any(Pvals < 0)) { legend(1 + mar[4] + 0.25 * 2.4/GLratio,(length(groups) > 1) * -0.5,paste("p <",alpha[length(alpha):1]), col = c("cadetblue1","#6495ED","blue","darkblue")[(5-length(alpha)):4], lty=1, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') legend(1 + mar[4] + 0.75 * 2.4/GLratio,(length(groups) > 1) * -0.5,paste("p <",alpha[length(alpha):1]), col = c(rgb(1,0.8,0.4) ,"orange","darkorange","darkorange2")[(5-length(alpha)):4], lty=1, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') } else { legend(1 + mar[4] + 0.5 * 2.4/GLratio,(length(groups) > 1) * -0.5,paste("p <",alpha[length(alpha):1]), col = c("cadetblue1","#6495ED","blue","darkblue")[(5-length(alpha)):4], lty=1, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') } } } else { if (legend.mode == "style2"){ # Generate names in list: LEGENDgroups <- lapply(groups,function(x)paste0(labels[x],": ",nodeNames[x])) LEGENDstr <- character(0) LEGENDcol <- character(0) LEGENDpch <- numeric(0) LEGENDtextfont <- numeric(0) for (GR in seq_along(groups)){ LEGENDstr <- c(LEGENDstr,names(groups)[GR],LEGENDgroups[[GR]]) LEGENDcol <- c(LEGENDcol,rep(color[GR],length(LEGENDgroups[[GR]])+1)) LEGENDpch <- c(LEGENDpch,16,rep(1,length(LEGENDgroups[[GR]]))) LEGENDtextfont <- c(LEGENDtextfont,2,rep(1,length(LEGENDgroups[[GR]]))) } legend (1.2 + 0.5 * 2.4/GLratio,0,LEGENDstr, col= LEGENDcol ,pch = LEGENDpch, text.font = LEGENDtextfont, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') } else if (legend.mode == "style1"){ # Generate names in list: LEGENDgroups <- lapply(groups,function(x)paste0(labels[x],": ",nodeNames[x])) LEGENDstr <- character(0) LEGENDcol <- character(0) LEGENDbord <- character(0) LEGENDpch <- numeric(0) LEGENDtextfont <- numeric(0) getShape <- function(x, border=FALSE){ sapply(x,function(xx){ if (xx == "circle"){ return(16) } else if (xx == "square"){ return(15) } else if (xx == "triangle"){ return(17) } else return(16) }) } for (GR in seq_along(groups)){ LEGENDstr <- c(LEGENDstr,names(groups)[GR],LEGENDgroups[[GR]],"") LEGENDcol <- c(LEGENDcol,NA,rep(color[GR],length(LEGENDgroups[[GR]])),NA) LEGENDbord <- c(LEGENDbord,NA,bcolor[groups[[GR]]],NA) LEGENDpch <- c(LEGENDpch,NA,getShape(shape[groups[[GR]]]),NA) LEGENDtextfont <- c(LEGENDtextfont,2,rep(1,length(LEGENDgroups[[GR]])),NA) } legend (1.2 + 0.5 * 2.4/GLratio,0,LEGENDstr, col= LEGENDcol ,pch = LEGENDpch, text.font = LEGENDtextfont, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') legend (1.2 + 0.5 * 2.4/GLratio,0,LEGENDstr, col= LEGENDbord ,pch = LEGENDpch-15, text.font = LEGENDtextfont, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') } else if (legend.mode == "names") { text(1 + mar[4] ,0, paste(labels,": ",nodeNames,sep="",collapse="\n"), cex=legend.cex, adj = c(0, 0.5)) } else { legend (1.2 + 0.5 * 2.4/GLratio,0, names(groups), col= color ,pch = 19, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') legend (1.2 + 0.5 * 2.4/GLratio,0, names(groups), col= "black" ,pch = 1, xjust=0.5, ,yjust=0.5, cex=legend.cex, bty='n') } } } if (!is.null(scores)) { plot(1, ann = FALSE, axes = FALSE, xlim = c(-0.5, scores.range[2]-scores.range[1]+8), ylim = c(0.5, length(groups)+2), type = "n", xaxs = "i", yaxs = "i") for (i in 1:length(groups)) { groupcols="white" groupcolors=1-t(col2rgb(color[i])/255) c=1 for (j in (scores.range[1]:scores.range[2]-scores.range[1])/(scores.range[2]-scores.range[1])) { groupcols[c]=rgb(1-j*groupcolors) c=c+1 } for (j in scores.range[1]:scores.range[2]-scores.range[1]) { polygon(c(j,j,j+1,j+1),c(i+0.05,i+0.95,i+0.95,i+0.05),col=groupcols[j+1],border=bcolor[i],lwd=2) } text(j+1.5,i+0.5,names(groups)[i],pos=4) } for (i in scores.range[1]:scores.range[2]-scores.range[1]) text(i+0.5,length(groups)+1.5,i+scores.range[1]) } } # Plot details: if (details & weighted) { if (cut != 0) text(0,-1.1,paste("Cutoff:",round(cut,2)),cex=0.6) if (minimum != 0) text(-1,-1.1,paste("Minimum:",round(minimum,2)),pos=4,cex=0.6) text(1,-1.1,paste("Maximum:",round(maximum,2)),pos=2,cex=0.6) } # plot title: if (!is.null(title)) { addTitle(title,cex = title.cex) } # Run postExpression if (!is.null(postExpression)) { eval(parse(text = postExpression)) } if (filetype%in%c('pdf','png','jpg','jpeg','svg','eps','tiff','tex')) { message(paste("Output stored in ",getwd(),"/",filename,".",filetype,sep="")) dev.off() } if (!noPar) par(mar=marOrig, bg=bgOrig) }qgraph/R/logGaus.R0000644000176200001440000000145314430573263013473 0ustar liggesusers# Simply computes the Gaussian log likelihood given sample covariance and estimate of precision: # Original: # logGaus <- function(S,K,n) # { # SK = S %*% K # tr = function(A) sum(diag(A)) # n/2 * (log(det(K)) - tr(SK)) # } ## According to huge??? logGaus <- function(S,K,n) { KS = K %*% S # SK = S %*% K tr = function(A) sum(diag(A)) return(n/2 * (log(det(K)) - tr(KS)) ) # return(n/2 * (log(det(K)) - tr(SK)) ) } # Computes the EBIC: EBIC <- function(S,K,n,gamma = 0.5,E,countDiagonal=FALSE) { # browser() L <- logGaus(S, K, n) if (missing(E)){ E <- sum(K[lower.tri(K,diag=countDiagonal)] != 0) # E <- sum(abs(K[lower.tri(K,diag=countDiagonal)]) > sqrt(.Machine$double.eps)) } p <- nrow(K) # return EBIC: -2 * L + E * log(n) + 4 * E * gamma * log(p) }qgraph/R/ggmFit.R0000644000176200001440000002104114430573263013302 0ustar liggesusersgoodNum <- function(x){ sapply(x,function(xx){ if (xx < 0.0001 & xx > 0){ return("< 0.0001") } digits <- max(0,floor(log10(abs(xx))) + 1) isInt <- xx%%1 == 0 gsub("\\.$","",formatC(signif(unlist(xx),digits=digits+(!isInt)*2), digits=digits+(!isInt)*2,format="fg", flag="#")) }) } # Computes fit measures of a GGM ggmFit <- function( pcor, # pcor matrix or qgraph object covMat, # sample variance-covariance matrix sampleSize, # Sample sample-size refit = TRUE, # Refit the model in glasso without LASSO? ebicTuning = 0.5, nPar, # Number of parameters, used for more general fit invSigma, # inverse variance covariance matrix instead of pcor, used for more general fit tol = sqrt(.Machine$double.eps), verbose = TRUE, countDiagonalPars = TRUE ){ mimic <- "lavaan" if (missing(covMat)){ stop("Argument 'covMat' may not be missing.") } if (missing(sampleSize)){ stop("Argument 'sampleSize' may not be missing.") } # mimic <- match.arg(mimic) # If mimic is lavaan, rescale covmat: # Scale to divide by N: if (mimic == "lavaan"){ covMat <- covMat * (sampleSize - 1)/sampleSize } # Number of observations: if (mimic == "lavaan"){ Ncons <- sampleSize } else { Ncons <- sampleSize - 1 } # If both pcor and invSigma not missing, stop: if (missing(pcor) & missing(invSigma)){ stop("'pcor' and 'invSigma' arguments can not both be missing.") } # If pcor missing, compute from invSigma: if (missing(pcor)){ pcor <- qgraph::wi2net(invSigma) } if (is(pcor,"qgraph")){ pcor <- getWmat(pcor) } pcor <- as.matrix(pcor) # Refit: if (refit){ if (verbose) message("Refitting network") if (!all(pcor[upper.tri(pcor)]!=0)){ glassoRes <- suppressWarnings(glasso::glasso(covMat, 0, zero = which(as.matrix(pcor) == 0 & diag(nrow(pcor)) != 1, arr.ind=TRUE))) } else { glassoRes <- suppressWarnings(glasso::glasso(covMat, 0)) } if (!missing(invSigma)){ warning("'invSigma' is ignored when refit = TRUE") } invSigma <- glassoRes$wi } else { # If missing invSigma, compute from pcor: if (missing(invSigma)){ # If qgraph object, extract: if (is(pcor,"qgraph")){ pcor <- qgraph::getWmat(pcor) diag(pcor) <- 1 } # Sample inverse variance-covariance matrix: # try <- try(sampleInverse <- corpcor::pseudoinverse(covMat)) # if (is(try,"try-error")){ # stop("Cannot compute pseudoinverse from sample variance-covariance matrix.") # } # Remove diagonal: diag(pcor) <- 0 # Compute delta scaling matrices: # Delta <- diag(sqrt(diag(sampleInverse))) Delta <- diag(sqrt(diag(covMat))) # Compute inverse: invSigma <- corpcor::pseudoinverse(Delta %*% cov2cor(corpcor::pseudoinverse(diag(ncol(pcor)) - pcor)) %*% Delta) invSigma[abs(invSigma) < tol] <- 0 } } # Refit: if (refit){ Sigma <- glassoRes$w } else { # Implied variance-covariance matrix: Sigma <- corpcor::pseudoinverse(invSigma) } # Fitmeasures list: fitMeasures <- list() # Number of variables: fitMeasures$nvar <- nVar <- ncol(covMat) # Number of observations: fitMeasures$nobs <- nVar * (nVar+1) / 2 # Number of parameters: if (missing(nPar)){ fitMeasures$npar <- sum(invSigma[upper.tri(invSigma,diag=countDiagonalPars)] != 0) } else { fitMeasures$npar <- nPar } # Degrees of freedom: fitMeasures$df <- fitMeasures$nobs - fitMeasures$npar # Compute Fmin: fitMeasures$fmin <- (sum(diag(covMat %*% corpcor::pseudoinverse(Sigma)))- log(det(covMat %*% corpcor::pseudoinverse(Sigma))) - nVar)/2 fitMeasures$chisq <- 2 * Ncons * fitMeasures$fmin fitMeasures$pvalue <- pchisq(fitMeasures$chisq, fitMeasures$df, lower.tail = FALSE) # Baseline model: # via glasso: glassoRes_baseline <- suppressWarnings(glasso::glasso(covMat, 0, zero = which(diag(nVar) == 0, arr.ind=TRUE))) invSigma_baseline <- glassoRes_baseline$wi Sigma_baseline <- glassoRes_baseline$w fitMeasures$baseline.chisq <- Ncons * (sum(diag(covMat %*% invSigma_baseline))- log(det(covMat %*% invSigma_baseline)) - nVar) fitMeasures$baseline.df <- fitMeasures$nobs - nVar fitMeasures$baseline.pvalue <- pchisq(fitMeasures$baseline.chisq, fitMeasures$baseline.df, lower.tail = FALSE) # Incremental Fit Indices Tb <- fitMeasures$baseline.chisq Tm <- fitMeasures$chisq dfb <- fitMeasures$baseline.df dfm <- fitMeasures$df fitMeasures$nfi <- (Tb - Tm) / Tb fitMeasures$tli <- (Tb/dfb - Tm/dfm) / (Tb/dfb - 1) fitMeasures$rfi <- (Tb/dfb - Tm/dfm) / (Tb/dfb ) fitMeasures$ifi <- (Tb - Tm) / (Tb - dfm) fitMeasures$rni <- ((Tb- dfb) - (Tm - dfm)) / (Tb - dfb) fitMeasures$cfi <- ifelse(dfm > Tm, 1, 1 - (Tm - dfm)/(Tb - dfb)) # RMSEA fitMeasures$rmsea <- sqrt( max(Tm - dfm,0) / (Ncons * dfm)) # Codes for rmsea confidence interval taken from lavaan: lower.lambda <- function(lambda) { (pchisq(Tm, df=dfm, ncp=lambda) - 0.95) } if(is.na(Tm) || is.na(dfm)) { fitMeasures$rmsea.ci.lower <- NA } else if(dfm < 1 || lower.lambda(0) < 0.0) { fitMeasures$rmsea.ci.lower <- 0 } else { if (lower.lambda(0) * lower.lambda(Tm) > 0){ lambda.l <- NA } else { lambda.l <- try(uniroot(f=lower.lambda, lower=0, upper=Tm)$root, silent=TRUE) } fitMeasures$rmsea.ci.lower <- sqrt( lambda.l/(sampleSize*dfm) ) } N.RMSEA <- max(sampleSize, Tm*4) upper.lambda <- function(lambda) { (pchisq(Tm, df=dfm, ncp=lambda) - 0.05) } if(is.na(Tm) || is.na(dfm)) { fitMeasures$rmsea.ci.upper <- NA } else if(dfm < 1 || upper.lambda(N.RMSEA) > 0 || upper.lambda(0) < 0) { fitMeasures$rmsea.ci.upper <- 0 } else { if (upper.lambda(0) * upper.lambda(N.RMSEA) > 0){ lambda.u <- NA } else { lambda.u <- try(uniroot(f=upper.lambda, lower=0,upper=N.RMSEA)$root, silent=TRUE) } if(inherits(lambda.u, "try-error")) {lambda.u <- NA } fitMeasures$rmsea.ci.upper <- sqrt( lambda.u/(sampleSize*dfm) ) } fitMeasures$rmsea.pvalue <- 1 - pchisq(Tm, df=dfm, ncp=(sampleSize*dfm*0.05^2)) # RMR: sqrt.d <- 1/sqrt(diag(covMat)) D <- diag(sqrt.d, ncol=length(sqrt.d)) R <- D %*% (covMat - Sigma) %*% D RR <- (covMat - Sigma) e <- nVar*(nVar+1)/2 + nVar fitMeasures$rmr <- sqrt( sum(RR[lower.tri(RR, diag=TRUE)]^2) / e ) fitMeasures$srmr <- sqrt( sum(R[lower.tri(R, diag=TRUE)]^2) / e ) # information criteria: # Saturated log-likelihood: c <- sampleSize*nVar/2 * log(2 * pi) satLL <- ( -c -(sampleSize/2) * log(det(covMat)) - (sampleSize/2)*nVar ) # log likelihood: LL <- -sampleSize * (fitMeasures$fmin - satLL/sampleSize) fitMeasures$logl <- LL fitMeasures$unrestricted.logl <- satLL # Deviance based AIC (traditional definition) fitMeasures$aic.ll <- -2*LL + 2* fitMeasures$npar # Deviance based AIC with sample size adjustment fitMeasures$aic.ll2 <- -2*LL + 2* fitMeasures$npar + (2*fitMeasures$npar^2 + 2*fitMeasures$npar)/(sampleSize - fitMeasures$npar - 1) # Chi-square based AIC with df penalty (Kaplan, 2000): AIC(null) - AIC(saturated) fitMeasures$aic.x <- Tm - 2*fitMeasures$df # Chi-square based AIC with parameter penalty (Kline, 2016) - couldn't find the proper derivation fitMeasures$aic.x2 <- Tm + 2*fitMeasures$npar BIC <- -2*LL + fitMeasures$npar * log(sampleSize) fitMeasures$bic <- BIC # add sample-size adjusted bic N.star <- (sampleSize + 2) / 24 BIC2 <- -2*LL + fitMeasures$npar * log(N.star) fitMeasures$bic2 <- BIC2 # Add extended bic: fitMeasures$ebic <- -2*LL + fitMeasures$npar * log(sampleSize) + 4 * fitMeasures$npar * ebicTuning * log(nVar) fitMeasures$ebicTuning <- ebicTuning # Results object: Results <- list( network = qgraph::wi2net(invSigma), invSigma=invSigma, fitMeasures = fitMeasures ) class(Results) <- "ggmFit" return(Results) } print.ggmFit <- function(x,...){ name <- deparse(substitute(x))[[1]] if (nchar(name) > 10) name <- "object" if (name=="x") name <- "object" cat("\nggmFit object:\n", paste0("Use plot(",name,") to plot the network structure"), "\n", paste0("Fit measures stored under ",name,"$fitMeasures"), "\n\n" ) fit <- data.frame(Measure = names(x$fitMeasures), Value = goodNum(unlist(x$fitMeasures))) rownames(fit) <- NULL print(fit) } plot.ggmFit <- function(x,...){ qgraph::qgraph(x$network,...) } qgraph/R/qgraph_growth.R0000644000176200001440000001747414430573263014760 0ustar liggesusers # init = initial layout in first frame qgraph.animate <- function(input,ind=NULL,...,constraint=10,growth="order",titles=NULL,sleep=0,smooth = TRUE, plotGraphs = TRUE, progress = TRUE, initLayout) { # arguments <- list(...) # # if (length(arguments)>0) # { # for (i in 1:length(arguments)) # { # if (class(arguments[[i]])=="qgraph") # { # if (!is.null(names(arguments[[i]]))) # { # for (j in 1:length(arguments[[i]])) # { # if (!(names(arguments[[i]])[j]%in%names(arguments))) # { # arguments[length(arguments)+1]=arguments[[i]][j] # names(arguments)[length(arguments)]=names(arguments[[i]])[j] # } # } # } # } # } # } arguments <- list(...) # Import arguments: if (length(arguments) > 0) arguments <- getArgs(arguments) # Import default arguments: def <- options("qgraph") if (!is.null(def$qgraph)) class(def$qgraph) <- "qgraph" if (any(sapply(def,function(x)!is.null(x)))) { arguments <- getArgs(c(arguments,def)) } # Extract labels: if(is.null(arguments[['labels']])) labels <- NULL else labels <- arguments[['labels']] # Check if list: inputIsList <- is.list(input) # Check for correct input: if (!inputIsList && nrow(input)!=ncol(input)) stop("input must be an inputacency matrix or list of inputacency matrices") if (!(growth %in% c("order","degree"))) stop("Incorrect growth") # Check for dimensions of list: if (inputIsList) { inputList <- input if (any(sapply(input,nrow)-sapply(input,ncol) != 0)) stop("If input is a list, it must contain only square matrices of the same dimesions") } # Number of nodes: if (inputIsList) n <- nrow(input[[1]]) else n <- nrow(input) # Make labels: if (length(labels) == 0) if (is.null(labels)) labels <- 1:n # Convert ind as matrix: if (is.data.frame(ind)) ind <- as.matrix(ind) # Default growth: if (is.null(ind) & inputIsList) { ind <- matrix(TRUE,length(inputList),n) } if (is.null(ind) & !inputIsList) { ind <- matrix(FALSE,n,n) if (growth == "order") { ind <- lower.tri(ind,diag=TRUE) } else if (growth == "degree") { degs <- order((rowSums(input)+colSums(input))/2,decreasing=TRUE) for (i in seq(n)) ind[i, degs[seq(i)]] <- TRUE } } # If ind is logical vector of length n, start with subset and increase normal: if (!inputIsList & is.logical(ind) && length(ind)==n) { sub <- ind ind <- matrix(FALSE,n - sum(sub) + 1,n) ind[1,] <- sub if (sum(sub) 3) { CoordSmooth <- matrix(,nrow(Coord),ncol(Coord)) CoordSmooth[,1] <- predict(loess(I(Coord[,1]) ~ I(1:nrow(Coord)))) CoordSmooth[,2] <- predict(loess(I(Coord[,2]) ~ I(1:nrow(Coord)))) } else { CoordSmooth <- Coord } # # plot(Coord[,2]) # lines(CoordSmooth[,2]) # Write back coordinates: for (g in seq_along(GraphsWithNode)) { Graphs[[GraphsWithNode[g]]]$layout[NodeIDs[g],1] <- CoordSmooth[g,1] Graphs[[GraphsWithNode[g]]]$layout[NodeIDs[g],2] <- CoordSmooth[g,2] } ### PRogress bar: if (progress) { setTxtProgressBar(pb, node) } } ### PRogress bar: if (progress) { close(pb) } } # Plot graphs: ### PRogress bar: if (progress) { if (plotGraphs) { message("Plotting Graphs") } else message("Recomputing Graphs") pb <- txtProgressBar(min = 0, max = length(Graphs), title = "Plotting Graphs:", style = 3) } for (graph in seq_along(Graphs)) { Graphs[[graph]] <- qgraph(Graphs[[graph]], DoNotPlot = !plotGraphs, layout = Graphs[[graph]]$layout ) # plot(Graphs[[graph]]) Sys.sleep(sleep) ### PRogress bar: if ( progress) { setTxtProgressBar(pb, graph) } } ### PRogress bar: if ( progress) { close(pb) } # Return graphs: invisible(Graphs) } qgraph/R/clusteringTable.R0000644000176200001440000001174214430573263015223 0ustar liggesusersclusteringTable <- function(..., labels,standardized=TRUE, relative = FALSE, signed = FALSE) { Wmats <- getWmat(list(...)) # if (any(sapply(Wmats,ncol)==1)){ # stop("Not supported for single-node graphs") # } # Check for single node: for (i in seq_along(Wmats)){ if (is.matrix(Wmats[[i]])){ if (ncol(Wmats[[i]]) == 1){ stop("Not supported for single-node graphs") } } else { if (any(sapply(Wmats[[i]],ncol)==1)){ stop("Not supported for single-node graphs") } } } # Check symmetric and remove: for (i in rev(seq_along(Wmats))) { if (is.list(Wmats[[i]])) { for (j in rev(seq_along(Wmats[[i]]))) { if (!isSymmetric(Wmats[[i]][[j]])) { Wmats[[i]] <- Wmats[[i]][-j] warning("Nonsymmetrical graph removed.") } } if (length(Wmats[[i]]) == 0) { Wmats <- Wmats[-i] } } else { if (!isSymmetric(Wmats[[i]])) { Wmats <- Wmats[-i] warning("Nonsymmetrical graph removed.") } } } # If no graphs: stop: if (length(Wmats)==0) stop("No symmetrical graphs") # Fix names: names(Wmats) <- fixnames(Wmats,"graph ") # Compute clustering: ClustAuto <- lapply(Wmats, clustcoef_auto) # names(ClustAuto) <- names(Wmats) # Fix tables: for (g in seq_along(ClustAuto)) { if (!is(ClustAuto[[g]],"clustcoef_auto")) { # Set type graph and labels: names(ClustAuto[[g]]) <- fixnames(ClustAuto[[g]],"type ") for (t in seq_along(ClustAuto[[g]])) { # Set labels: if (!missing(labels)) { ClustAuto[[g]][[t]][['node']] <- labels } else if(!is.null(rownames(ClustAuto[[g]][[t]]))) { ClustAuto[[g]][[t]][['node']] <- rownames(ClustAuto[[g]][[t]]) } else ClustAuto[[g]][[t]][['node']] <- paste("Node",seq_len(nrow(ClustAuto[[g]][[t]]))) ClustAuto[[g]][[t]]$graph <- names(ClustAuto)[g] ClustAuto[[g]][[t]]$type <- names(ClustAuto[[g]])[t] } } else { # Set graph: ClustAuto[[g]]$graph <- names(ClustAuto)[g] # Set labels: if (!missing(labels)) { ClustAuto[[g]][['node']] <- labels } else if(!is.null(rownames(ClustAuto[[g]]))) { ClustAuto[[g]][['node']] <- rownames(ClustAuto[[g]]) } else ClustAuto[[g]][['node']] <- paste("Node",seq_len(nrow(ClustAuto[[g]]))) } } # If lists, fix: isList <- sapply(ClustAuto,function(x)!"clustcoef_auto"%in%class(x)) if (any(isList)) { for (l in which(isList)) { ClustAuto <- c(ClustAuto,ClustAuto[[l]]) } ClustAuto <- ClustAuto[-which(isList)] } # Add method and labels to tables and remove signed for (i in seq_along(ClustAuto)) { if (any(grepl("signed_",names(ClustAuto[[i]])))) { ClustAuto[[i]] <- ClustAuto[[i]][,sapply(ClustAuto[[i]],mode)!="numeric"|grepl("signed_",names(ClustAuto[[i]])) == signed] names(ClustAuto[[i]]) <- gsub("signed_","",names(ClustAuto[[i]])) } names(ClustAuto[[i]]) <- gsub("clust","",names(ClustAuto[[i]])) # Relativate: if (relative | standardized) { if (relative & standardized) { warning("Using 'relative' and 'standardized' together is not recommended") } for (j in which(sapply(ClustAuto[[i]],mode)=="numeric")) { if (standardized) { ClustAuto[[i]][,j] <- scale2(ClustAuto[[i]][,j]) } if (relative) { mx <- max(abs(ClustAuto[[i]][,j]), na.rm = TRUE) if (mx != 0) { ClustAuto[[i]][,j] <- ClustAuto[[i]][,j] / mx } } # Remove attributes: attributes(ClustAuto[[i]][,j]) <- NULL } } } ## WIDE FORMAT TABLE: WideCent <- rbind.fill(ClustAuto) if (is.null(WideCent$type)) WideCent$type <- NA # LONG FORMAT: LongCent <- reshape2::melt(WideCent, variable.name = "measure", id.var = c("graph","type", "node")) return(LongCent) # # # # # # # # Removed signed, add method and labels to tables: # for (i in seq_along(ClustAuto)) # { # # # # Relativate: # if (relative) # { # for (j in seq_len(ncol(ClustAuto[[i]]))) # { # ClustAuto[[i]][j] <- ClustAuto[[i]][j] / max(abs(ClustAuto[[i]][j]), na.rm = TRUE) # } # } # # ClustAuto[[i]][['graph']] <- names(ClustAuto)[i] # # if (!missing(labels)) # { # ClustAuto[[i]][['node']] <- labels # } else if(!is.null(colnames(Wmats[[i]]))) # { # ClustAuto[[i]][['node']] <- colnames(Wmats[[i]]) # } else ClustAuto[[i]][['node']] <- paste("node",seq_len(nrow(ClustAuto[[i]]))) # # # } # # ## WIDE FORMAT TABLE: # Wide <- rbind.fill(ClustAuto) # # # LONG FORMAT: # Long <- melt(Wide, variable.name = "measure", id.var = c("graph","node")) # # return(Long) }qgraph/R/as_ggraph.R0000644000176200001440000000444314430573263014027 0ustar liggesusers# as.ggraph <- function(object){ # # To graph object using tidygraph: # df <- as.data.frame(object$Edgelist) # # if (any(df$directed)){ # stop("Not yet supported for directed graphs") # } # # tidyG <- tidygraph::tbl_graph(edges = df[,1:2], directed = all(df$directed)) # # edges <- NULL # nodes <- NULL # order <- NULL # n <- NULL # # # Add ID factor: # tidyG <- tidyG %>% tidygraph::activate(edges) %>% dplyr::mutate(id = as.factor(seq_len(dplyr::n()))) # tidyG <- tidyG %>% tidygraph::activate(nodes) %>% dplyr::mutate(id = as.factor(seq_len(dplyr::n())), label = object$graphAttributes$Nodes$labels) # # # Put the edges in the right order: # tidyG <- tidyG %>% tidygraph::activate(edges) %>% dplyr::mutate(order = order(object$graphAttributes$Graph$edgesort)) %>% dplyr::arrange(order) # # # Create plot: # ggraph::ggraph(tidyG, layout = object$layout) + # # # Edges: # ggraph::geom_edge_fan(aes( # color = id, # edge_width = id, # edge_linetype = id # ), show.legend = FALSE) + # ggraph::scale_edge_color_manual(values = object$graphAttributes$Edges$color) + # ggraph::scale_edge_width_manual(values = object$graphAttributes$Edges$width/2) + # ggraph::scale_edge_linetype_manual(values = sapply(object$graphAttributes$Edges$lty,switch, # '1' = "solid", '2' = "dashed", '3'= "dotted", '4' = "dotdash", '5' = "longdash", '6' = "twodash" # )) + # # # Nodes: # ggraph::geom_node_point( # aes_string( # size = "id", # colour = "id", # fill = "id" # ), shape = 21, show.legend = FALSE # ) + # ggplot2::scale_size_manual(values = object$graphAttributes$Nodes$width*2) + # ggplot2::scale_colour_manual(values = object$graphAttributes$Nodes$border.color) + # ggplot2::scale_fill_manual(values = object$graphAttributes$Nodes$color) + # # # Labels: # ggraph::geom_node_text( # aes_string( # label = "label" # ) # ) + # # # Limits: # ggplot2::xlim(-1-object$plotOptions$mar[2],1+object$plotOptions$mar[3]) + # ggplot2::ylim(-1-object$plotOptions$mar[1],1+object$plotOptions$mar[3]) + # # # Theme # ggraph::theme_graph(plot_margin=margin(0,0,0,0)) # }qgraph/R/flow.R0000644000176200001440000000732414430573263013044 0ustar liggesusers# flow2 <- function( # object, # qgraph object # from, # Node of origin # horizontal = TRUE, # sizeOrig = 10, # sizeCon = 3, # sizeDiscon = 1, # fadingStyle = c("gradual","split","default","off"), # proportional fading to distance? # maxFade = 0.25, # xScale = 1, # ... # Qgraph arguments # ){ # fadingStyle <- match.arg(fadingStyle) # # Test input: # if (!is(object,"qgraph")){ # warning("Input is not a qgraph object, runnin gqgraph") # object <- qgraph(object, ..., DoNotPlot = TRUE) # } # if (length(from)!=1){ # stop("'from' must be of length 1") # } # # # Obtain edgelist: # E <- as.data.frame(object$Edgelist) # # # If not fully connected, stop: # foo <- capture.output(comps <- sna::components(getWmat(object))) # if (comps > 1){ # stop("Disconnected graph is not yet supported.") # } # # # ID all edges: # E$id <- seq_len(nrow(E)) # # # Now we will create new edgelists for every part of the plot. The first will simply connect the target node to its neighbors: # flowE <- list() # curPart <- 1 # # # Enter the first part (relabel later) # part1 <- E[E[,1] == from,c("from","to","weight","id")] # part2 <- E[E[,2] == from,c("to","from","weight","id")] # names(part2) <- c("from","to","weight","id") # flowE[[curPart]] <- rbind(part1,part2) # # # remaining edges: # remainIDs <- E$id[!E$id %in% flowE[[curPart]]$id] # # # While edges remain, add elements # while (length(remainIDs)>0){ # curPart <- curPart + 1 # part1 <- E[E[,1] %in% flowE[[curPart-1]]$to,c("from","to","weight","id")] # part2 <- E[E[,2] %in% flowE[[curPart-1]]$to,c("to","from","weight","id")] # names(part2) <- c("from","to","weight","id") # flowE[[curPart]] <- rbind(part1,part2) # flowE[[curPart]] <- flowE[[curPart]][flowE[[curPart]]$id %in% remainIDs,] # # # remaining edges: # remainIDs <- remainIDs[!remainIDs %in% flowE[[curPart]]$id] # } # # # # Now loop over elements to rename nodes: # nNodes <- object$graphAttributes$Graph$nNodes # allNodes <- seq_len(nNodes) # targetNodes <- allNodes[allNodes!=from] # nTarget <- length(targetNodes) # translateNodes <- NULL # for (c in seq_along(flowE)){ # if (c > 1){ # flowE[[c]]$from <- translateNodes[match(flowE[[c]]$from ,targetNodes)] # } else { # flowE[[c]]$from[] <- 1 # } # translateNodes <- 1 + ((c-1) * nTarget) + seq_len(nTarget) # flowE[[c]]$to <- translateNodes[match(flowE[[c]]$to ,targetNodes)] # } # # # Compute the layout: # L <- matrix(0,1,2) # for (c in seq_along(flowE)){ # L <- rbind(L, # cbind(seq(-1,1,length=nTarget),c) # ) # } # # # # Labels: # Labels <- object$graphAttributes$Nodes$labels # newLabs <- c(Labels[from],rep(Labels[-from],length(flowE))) # # # Edgelist: # flowE <- do.call(rbind,flowE) # # # Compute sizes: # allNodes <- 1:translateNodes[length(translateNodes)] # ConnectedNodes <- unlist(flowE[,1:2]) # vSize <- ifelse(allNodes %in% ConnectedNodes, sizeCon, sizeDiscon) # vSize[1] <- sizeOrig # # # Proportional fading: # if (fadingStyle == "default"){ # Fade <- TRUE # } else if (fadingStyle == "off"){ # Fade <- FALSE # } else if (fadingStyle == "gradual"){ # # Make fading proportional to x location of orin node # Fade <- 1-(L[flowE[,1],2]/max(L[flowE[,1],2]) * (1-maxFade)) # } else { # Fade <- ifelse(L[flowE[,1],2]==0,1,maxFade) # } # # # Rescale x: # L[,2] <- L[,2]^{1/xScale} # # # Horizontal: # if (horizontal){ # L <- L[,2:1] # } # # # Plot: # qgraph(flowE[,1:3], labels = newLabs, layout = L, directed = FALSE, curve = 0, # vsize = vSize, object, DoNotPlot = FALSE,fade=Fade,...) # } qgraph/R/isColor.R0000644000176200001440000000024514430573263013502 0ustar liggesusersisColor <- function(x) { sapply(x, function(X) { if (!is.logical(X)) tryCatch(is.matrix(col2rgb(X)), error = function(e) FALSE) else FALSE }) }qgraph/R/qgraphAnnotate.R0000644000176200001440000000573414430573263015054 0ustar liggesusers# Removed due to sendplot being removed from CRAN # # Uses sendplot to annotate a qgraph object: # qgraphAnnotate <- function( # graph, # graph object from qgraph # ..., # Named vectors indicating elements of the tooltip # fromqgraph = c("labels","nodeNames","tooltips","groups"), # Vector indicating which info should be extracted from qgraph object and plotted. # filename = "qgraph", # image.size = "600x600", # window.size = image.size, # legend = FALSE # Overwries legend plotting # ) # { # if(!requireNamespace("sendplot")) stop("'sendplot' package needs to be installed.") # # # List containing the labels: # TooltipContents <- list(...) # # # Extract info from qgraph: # if ("labels" %in% fromqgraph && !is.null(graph$graphAttributes$Nodes$labels) && !is.logical(graph$graphAttributes$Nodes$labels)) # { # TooltipContents$Label <- graph$graphAttributes$Nodes$labels # } # # if ("nodeNames" %in% fromqgraph && !is.null(graph$graphAttributes$Nodes$names) && !is.logical(graph$graphAttributes$Nodes$names)) # { # TooltipContents$Name <- graph$graphAttributes$Nodes$names # } # # if ("tooltips" %in% fromqgraph && !is.null(graph$graphAttributes$Nodes$tooltips) && !is.logical(graph$graphAttributes$Nodes$tooltips)) # { # TooltipContents$Tooltip <- graph$graphAttributes$Nodes$tooltips # } # # if ("groups" %in% fromqgraph && !is.null(graph$graphAttributes$Graph$groups) && length(graph$graphAttributes$Graph$groups) > 1) # { # gr <- graph$graphAttributes$Graph$groups # if (is.null(names(gr))) names(gr) <- paste("Group",seq_along(gr)) # # TooltipContents$Group <- sapply(seq_len(graph$graphAttributes$Graph$nNodes), function(n) paste(names(gr)[sapply(gr,function(g)n%in%g)], collapse = "; ")) # } # # TooltipContents <- as.data.frame(TooltipContents) # # # Fix for legend: # graph$plotOptions$legend <- legend # # # Create plot: # # xy.send(paste0("qgraph:::plot.qgraph(",dput(graph),")"), # save(graph, file = tempfile(fileext = ".RData") -> gObj) # if (grepl("(windows)|(ming)",R.Version()$os,ignore.case=TRUE)){ # gObj <- gsub("\\\\","\\\\\\\\",gObj) # } # # if (NROW(TooltipContents) > 0) # { # sendplot::xy.send(paste0("load('",gObj,"');qgraph:::plot.qgraph(graph)"), # x.pos = graph$layout[,1], # y.pos = graph$layout[,2], # xy.labels = TooltipContents, # fname.root = filename, # dir = paste0(getwd(),"/"), # image.size = image.size, # window.size = window.size) # } else { # sendplot::xy.send(paste0("load('",gObj,"');qgraph:::plot.qgraph(graph)"), # x.pos = -100, # y.pos = -100, # xy.labels = data.frame(` ` = ''), # fname.root = filename, # dir = paste0(getwd(),"/"), # image.size = image.size, # window.size = window.size) # } # # xy.send("plot.qgraph(graph)", # # # return(paste0(filename,".html")) # }qgraph/R/mapusr2in.R0000644000176200001440000000253614430573263014015 0ustar liggesusers# Map user space to inches space: usr2inX <- function(x) { usr <- par("usr") pin <- par("pin") (x-usr[1])/(usr[2]-usr[1]) * pin[1] } usr2inY <- function(x) { usr <- par("usr") pin <- par("pin") (x-usr[3])/(usr[4]-usr[3]) * pin[2] } # Same but about origin (for atan2): usr2inX2 <- function(x) { usr <- par("usr") pin <- par("pin") x/(usr[2]-usr[1]) * pin[1] } usr2inY2 <- function(x) { usr <- par("usr") pin <- par("pin") x/(usr[4]-usr[3]) * pin[2] } atan2usr2in <- function(x,y) atan2(usr2inX2(x),usr2inY2(y))%%(2*pi) # Map inches space to user space: in2usrX <- function(x) { usr <- par("usr") pin <- par("pin") usr[1] + x/pin[1] * (usr[2] - usr[1]) } in2usrY <- function(x) { usr <- par("usr") pin <- par("pin") usr[3] + x/pin[2] * (usr[4] - usr[3]) } ## Find perpundicular poin to quantile of line: PerpMid <- function(xy0,xy1,ang=1,cex=1,q=0.5) { # Change xy0 to quantile: xy0 <- xy0 + q * (xy1 - xy0) # Fixed inches size: cexIn <- cex * 0.025 * sqrt(sum(par("pin")^2)) # Rotate about origin: xyr <- matrix(c(0,ang,-ang,0),2,2) %*% (c(usr2inX(xy1[1]),usr2inY(xy1[2])) - c(usr2inX(xy0[1]),usr2inY(xy0[2]))) # Rescale: xyr <- xyr * cexIn/sqrt(sum(xyr^2)) # Add origin: xyr <- c(usr2inX(xy0[1]),usr2inY(xy0[2])) + xyr # Map to usr and return: return(c(in2usrX(xyr[1]),in2usrY(xyr[2]))) } qgraph/R/qgraph.rack.R0000644000176200001440000001007514430573263014273 0ustar liggesusers## Merges graph2 into graph1 at one node only. qgraph.rack <- function( graph1, # Original graph graph2, # Graph to include in original graph link = c(1,1), # integer vector of two indicating which node links the graphs, first element is node number in node 1 and second element the node number in node 2 scale = 0.5, # Scale of graph 2 relative to graph 1 rotation = 0, # rotation of graph 2, in radian plot = TRUE # Plot the results? ) { # Numbers of nodes: n1 <- graph1$graphAttributes$Graph$nNodes n2 <- graph2$graphAttributes$Graph$nNodes # Number of edges: e1 <- length(graph1$Edgelist$from) e2 <- length(graph2$Edgelist$from) # New IDs in graph 2: oldID <- 1:n2 newNodes <- oldID[oldID != link[2]] newID <- integer(n2) newID[link[2]] <- link[1] newID[-link[2]] <- n1 + seq_len(n2-1) ### Update graph 2 and add to 1 ### # Edgelist: graph2$Edgelist$from <- newID[graph2$Edgelist$from] graph1$Edgelist$from <- c(graph1$Edgelist$from, graph2$Edgelist$from) graph2$Edgelist$to <- newID[graph2$Edgelist$to] graph1$Edgelist$to <- c(graph1$Edgelist$to, graph2$Edgelist$to) graph1$Edgelist$weight <- c(graph1$Edgelist$weight, graph2$Edgelist$weight) graph1$Edgelist$directed <- c(graph1$Edgelist$directed, graph2$Edgelist$directed) graph1$Edgelist$bidirectional <- c(graph1$Edgelist$bidirectional, graph2$Edgelist$bidirectional) # Arguments # Nodes: for (a in unique(c(names(graph1$Arguments), names(graph2$Arguments)))) { if (!is.null(graph1$Arguments[[a]]) & !is.null(graph2$Arguments[[a]])) { # Length of nodes: if (length(graph1$Arguments[[a]]) == n1 & length(graph2$Arguments[[a]]) == n2) { graph1$Arguments[[a]] <- c(graph1$Arguments[[a]], graph2$Arguments[[a]][newNodes] ) } # Length of edges: if (length(graph1$Arguments[[a]]) == e1 & length(graph2$Arguments[[a]]) == e2) { graph1$Arguments[[a]] <- c(graph1$Arguments[[a]], graph2$Arguments[[a]]) } } } # graphAttributes: # Nodes: for (a in seq_along(graph1$graphAttributes$Nodes)) { if (length(graph1$graphAttributes$Nodes[[a]]) == n1 & length(graph2$graphAttributes$Nodes[[a]]) == n2) { graph1$graphAttributes$Nodes[[a]] <- c(graph1$graphAttributes$Nodes[[a]], graph2$graphAttributes$Nodes[[a]][newNodes] ) } } # Edges: for (a in seq_along(graph1$graphAttributes$Edges)) { if (length(graph1$graphAttributes$Edges[[a]]) == e1 & length(graph2$graphAttributes$Edges[[a]]) == e2) { graph1$graphAttributes$Edges[[a]] <- c(graph1$graphAttributes$Edges[[a]], graph2$graphAttributes$Edges[[a]]) } } # Knots: graph2$graphAttributes$Knots$knots[graph2$graphAttributes$Knots$knots > 0] <- graph2$graphAttributes$Knots$knots[graph2$graphAttributes$Knots$knots > 0] + max(graph1$graphAttributes$Knots$knots) graph1$graphAttributes$Knots$knots <- c(graph1$graphAttributes$Knots$knots, graph2$graphAttributes$Knots$knots) # Graph: graph1$graphAttributes$Graph$nNodes <- n1 + n2 - 1 graph1$graphAttributes$Graph$edgesort <- sort(abs(graph1$Edgelist$weight),index.return=TRUE)$ix for (g in seq_along(graph2$graphAttributes$Graph$groups)) { graph2$graphAttributes$Graph$groups[[g]] <- newID[graph2$graphAttributes$Graph$groups[[g]]] } graph1$graphAttributes$Graph$groups <- c(graph1$graphAttributes$Graph$groups, graph2$graphAttributes$Graph$groups) graph1$graphAttributes$Graph$color <- c(graph1$graphAttributes$Graph$color, graph2$graphAttributes$Graph$color) ### ROTATE AND SCALE LAYOUT ### L1 <- graph1$layout L2 <- graph2$layout # Center layout 2: L2[,1] <- L2[,1] - L2[link[2],1] L2[,2] <- L2[,2] - L2[link[2],2] # Scale: L2 <- L2 * scale # Rotate: RotMat <- function(d) matrix(c(cos(-d),sin(-d),-sin(-d),cos(-d)),2,2) L2 <- t(RotMat( rotation ) %*% t(L2)) # Center to L1: L2[,1] <- L2[,1] + L1[link[1],1] L2[,2] <- L2[,2] + L1[link[1],2] # Combine: graph1$layout <- rbind(L1, L2[newNodes,]) if (plot) { plot(graph1) invisible(graph1) } else { return(graph1) } }qgraph/R/sign0.R0000644000176200001440000000005314430573263013105 0ustar liggesuserssign0 <- function(x) ifelse(sign(x)<0,-1,1)qgraph/R/layout.R0000644000176200001440000000561214430573263013410 0ustar liggesusersqgraph.layout.fruchtermanreingold=function(edgelist,weights=NULL,vcount=NULL,niter=NULL,max.delta=NULL,area=NULL,cool.exp=NULL,repulse.rad=NULL,init=NULL,groups=NULL,rotation=NULL,layout.control=0.5,constraints=NULL,round = TRUE, digits = NULL){ version <- NULL Ef<-as.integer(edgelist[,1]-1) Et<-as.integer(edgelist[,2]-1) #Provide default settings ecount=nrow(edgelist) if (is.null(digits)) digits <- 5 if(is.null(version)) version <- 2 if (!is.null(vcount)) n=vcount else n=max(length(unique(c(edgelist))),max(edgelist)) if (is.null(weights)) weights=rep(1,ecount) if(is.null(niter)) niter<-500 if(is.null(max.delta)) max.delta<-n if (length(max.delta)==1) max.delta=rep(max.delta,n) if(is.null(area)) area<-n^2 if(is.null(cool.exp)) cool.exp<-1.5 if(is.null(repulse.rad)) repulse.rad<-area*n if(is.null(init)){ #tempa<-sample((0:(n-1))/n) #Set initial positions randomly on the circle #x<-n/(2*pi)*sin(2*pi*tempa) #y<-n/(2*pi)*cos(2*pi*tempa) init=matrix(0,nrow=n,ncol=2) tl=n+1 init[,1]=sin(seq(0,2*pi, length=tl))[-tl] init[,2]=cos(seq(0,2*pi, length=tl))[-tl] } if (any(duplicated(init))) { init[duplicated(init),] <- init[duplicated(init),] + rnorm(prod(dim(init[duplicated(init),,drop=FALSE])),0,1e-10) warning("Duplciated initial placement found. Initial slightly pertubated.") } x<-init[,1] y<-init[,2] # constraints: if (is.null(constraints)) { Cx=Cy=rep(FALSE,vcount) } else { Cx=!is.na(constraints[,1]) Cy=!is.na(constraints[,2]) } x[Cx]=constraints[Cx,1] y[Cy]=constraints[Cy,2] # Round: if (round){ weights <- round(weights, digits) x <- round(x, digits) y <- round(y, digits) } #Symmetrize the graph, just in case #d<-symmetrize(d,rule="weak",return.as.edgelist=TRUE) #Perform the layout calculation if (version == 1){ stop("Layout version 1 currently not supported.") # layout<-.C("qgraph_layout_fruchtermanreingold_R_old", as.integer(niter), as.integer(n), as.integer(ecount), as.double(max.delta), # as.double(area), as.double(cool.exp), as.double(repulse.rad), as.integer(Ef), # as.integer(Et), as.double(abs(weights)), as.double(x), as.double(y), as.integer(Cx), as.integer(Cy)) # #Return the result return(cbind(layout[[11]],layout[[12]])) } else if (version == 2){ layout <- qgraph_layout_Cpp( pniter = as.integer(niter), pvcount = as.integer(n), pecount = as.integer(ecount), maxdelta = max.delta, parea = as.double(area), pcoolexp = as.double(cool.exp), prepulserad = as.double(repulse.rad), Ef = Ef, Et = Et, W = abs(weights), xInit = as.double(x), yInit = as.double(y), Cx = as.logical(Cx), Cy = as.logical(Cy), as.integer(digits)) #Return the result } else stop("Version must be 1 or 2.") } qgraph/R/XKCD.R0000644000176200001440000000040414430573263012616 0ustar liggesusersxkcd_jitter <- function(x, y, jit = 1000) { len <- length(x) rg <- par("usr") yjitter <- (rg[4] - rg[3]) / jit xjitter <- (rg[2] - rg[1]) / jit x_mod <- x + rnorm(len) * xjitter y_mod <- y + rnorm(len) * yjitter return(list(x=x_mod,y=y_mod)) } #qgraph/R/printplotsummary.R0000644000176200001440000000104014430573263015533 0ustar liggesusers print.qgraph <- function(x,...) { out <- cbind(x$Edgelist$from,"\t",ifelse(x$Edgelist$directed,ifelse(x$Edgelist$bidir,"<->","-->"),"---"),"\t",x$Edgelist$to,"\t",round(x$Edgelist$weight,2),"\n") cat(c("From\t\tTo\tWeight\n")) apply(out,1,cat) } # plot.qgraph <- function(x,...) qgraph(x,...) summary.qgraph <- function(object, ...) { cat("Number of edges:\t",length(object$Edgelist$from),"\n","Number of directed edges:\t",sum(object$Edgelist$directed),"\n","Number of unique weights:\t",length(unique(object$Edgelist$weight)),"\n\n") }qgraph/R/getWmat.R0000644000176200001440000001237114430573263013503 0ustar liggesuserslibrary("Matrix") # Computes the weights matrix getWmat <- function(x,...) { UseMethod("getWmat") } # List: getWmat.list <- function(x,...) { Res <- lapply(x,getWmat,...) return(Res) # if (is.null(names(x))) # { # names(x) <- "" # } # # names(x) <- ifelse(names(x)=="",seq_along(names(x)),names(x)) # # # Check if some objects are psynet objects or psynetGraph objects and adjust accordingly: # if (any(sapply(x,class) == "psynet","psynetGraph")) # { # psynets <- which(sapply(x,class) == "psynet") # for (g in psynets) # { # if (length(psynets) > 1) # { # names(x[[g]]) <- paste0(g,names(x[[g]])) # } # x <- c(x,x[[g]]) # } # x <- x[-psynets] # } # # if (any(sapply(x,class) == "psynetGraph")) # { # psynetGraphs <- which(sapply(x,class) == "psynetGraph") # for (g in psynetGraphs) # { # graph <- x[[g]]$graph # if (names(x)[g] == as.character(g)) # { # nam <- x[[g]]$method # } else nam <- names(x)[g] # x[[g]] <- graph # names(x)[g] <- nam # } # } # # Wmats <- lapply(x, getWmat) # # return(Wmats) } # Matrix: getWmat.matrix <- function(x,nNodes,labels, directed = TRUE,...) { if (mode(x)!="numeric") stop("Input matrix must be numeric") # Weights matrix: if (length(unique(dim(x))) == 1) { if (missing(labels)) { if (!is.null(colnames(x))) { labels <- colnames(x) } } if (!missing(labels)) { if (!all(length(labels)==dim(x))) stop("Length labels must match dimensions of Weights matrix") colnames(x) <- rownames(x) <- labels } return(x) } if (!ncol(x) %in% c(2,3)) { stop("Edgelist must have either 2 or 3 columns") } if (missing(nNodes)) { if (!missing(labels)) { nNodes <- length(labels) } else nNodes <- max(x[,1:2]) } if (!missing(labels)) { if (length(labels) != nNodes) stop("Length of labels must match number of nodes") } from <- c(x[,1], x[!directed,2]) to <- c(x[,2] , x[!directed,1]) if (ncol(x) == 2) { w <- rep(1,length(from)) } else { w <- c(x[,3], x[!directed,3]) } # Unweighted Edgelist: if ( ncol(x)==2 ) { mat <- as.matrix(1*sparseMatrix(from,to, dims = c(nNodes,nNodes))) if (!missing(labels)) rownames(mat) <- colnames(mat) <- labels return(mat) } else { mat <- as.matrix(1*sparseMatrix(from,to,x=w, dims = c(nNodes,nNodes))) if (!missing(labels)) rownames(mat) <- colnames(mat) <- labels return(mat) } } # Data frame (edgelist) getWmat.data.frame <- function(x,nNodes,labels,directed=TRUE,...) { if (!ncol(x) %in% c(2,3)) { stop("Edgelist must have either 2 or 3 columns") } if (ncol(x) == 3 && !is.numeric(x[,3])) stop("Third column is not numeric") # Replace labels with nodes: if (is.factor(x[,1])) x[,1] <- as.character(x[,1]) if (is.factor(x[,2])) x[,2] <- as.character(x[,2]) if (is.character(x[,1]) & !is.character(x[,2]) | !is.character(x[,1]) & is.character(x[,2])) stop("Both from and to columns must be either numeric or character") if (is.character(x[,1]) & is.character(x[,2])) { if (missing(labels)) { labels <- unique(c(x[,1], x[,2])) } if (any(!c(x[,1],x[,2]) %in% labels)) stop("labels does not contain all node names.") x[,1] <- match(x[,1], labels) x[,2] <- match(x[,2], labels) } if (missing(nNodes)) { if (!missing(labels)) { nNodes <- length(labels) } else nNodes <- max(x[,1:2]) } if (!missing(labels)) { if (length(labels) != nNodes) stop("Length of labels must match number of nodes") } from <- c(x[,1], x[!directed,2]) to <- c(x[,2] , x[!directed,1]) if (ncol(x) == 2) { w <- rep(1,length(from)) } else { w <- c(x[,3], x[!directed,3]) } # Unweighted Edgelist: if ( ncol(x)==2 ) { mat <- as.matrix(1*sparseMatrix(from,to, dims = c(nNodes,nNodes))) if (!missing(labels)) rownames(mat) <- colnames(mat) <- labels return(mat) } else { mat <- as.matrix(1*sparseMatrix(from,to,x=w, dims = c(nNodes,nNodes))) if (!missing(labels)) rownames(mat) <- colnames(mat) <- labels return(mat) } } ### igraph getWmat.igraph <- function(x, labels,...) { return(as.matrix(get.adjacency(x))) } ### qgraph: getWmat.qgraph <- function(x, directed,...) { if (!is.null(x[['graphAttributes']][['Graph']][['weighted']])) if (!x[['graphAttributes']][['Graph']][['weighted']]) x[['Edgelist']][['weight']] <- ifelse(x[['Edgelist']][['weight']]==0,0,1) E <- x[['Edgelist']] n <- x[['graphAttributes']][['Graph']][['nNodes']] if (!missing(directed)) E$directed <- directed from <- c(E$from, E$to[!E$directed | E$bidir]) to <- c(E$to , E$from[!E$directed | E$bidir]) w <- c(E$weight, E$weight[!E$directed | E$bidir]) df <- data.frame( from=from, to=to, w=w ) df <- df[!duplicated(df),] mat <- as.matrix(1*sparseMatrix(df$from, df$to,x= df$w, dims = c(n,n))) rownames(mat) <- colnames(mat) <- x$graphAttributes$Nodes$labels return(mat) } getWmat.bootnetResult <- function(x, ...){ wMat <- x$graph if (!isTRUE(x$weighted)){ wMat <- sign(wMat) } if (!isTRUE(x$signed)){ wMat <- abs(wMat) } return(wMat) } qgraph/R/mixCols.R0000644000176200001440000000051014430573263013501 0ustar liggesusers## Function to mix color vector x with weight w mixCols <- function(x,w) { # x = vector of colors # w = weights if (missing(w)) w <- rep(1,length(x)) if (length(w)==1) w <- rep(w,length(x)) RGB <- col2rgb(x) wMeans <- apply(RGB,1,weighted.mean,w=w) return(rgb(wMeans[1],wMeans[2],wMeans[3],maxColorValue=255)) }qgraph/R/smallworldIndex.R0000644000176200001440000000153514430573263015243 0ustar liggesusers# Clustering random graph: Cr <- function(x){ if ("igraph"%in%class(x)) x <- get.adjacency(x) N=nrow(x) p=sum(x/2)/sum(lower.tri(x)) t=(p*(N-1)/N) t } # Average shortest path length random graph: APLr <- function(x){ if ("igraph"%in%class(x)) x <- get.adjacency(x) N=nrow(x) p=sum(x/2)/sum(lower.tri(x)) eulers_constant <- .57721566490153 l = (log(N)-eulers_constant)/log(p*(N-1)) +.5 l } smallworldIndex <- function(x){ if ("qgraph"%in%class(x)) x <- as.igraph(x) if (!all(E(x)$weight==1)) { warning("Edge weights removed") E(x)$weight[] <- 1 } list( transitivity = igraph::transitivity(x), transitivity_random = Cr(x), APL = igraph::average.path.length(x), APL_random = APLr(x), index = (igraph::transitivity(x) / Cr(x)) / (igraph::average.path.length(x) / APLr(x)) ) } qgraph/R/qgraphGUI.R0000644000176200001440000002127314430573263013723 0ustar liggesusers qgraph.gui <- function(input,corMat,...) { stop("This function has been removed in favor of our Shiny app: https://jolandakos.shinyapps.io/NetworkApp/") # # if (!require("rpanel")) stop("Package 'rpanel' is required to use GUI functionality") # # ## CHECK FOR CORRELATION MATRIX ###: # if (missing(corMat)) # { # if (is.matrix(input) && isSymmetric(input) && all(diag(input)==1) && all(abs(input)<=1)) # { # corMat <- TRUE # } else corMat <- FALSE # } # # if (!is.logical(corMat)) stop("'corMat' must be logical") # # if (any(grepl("RStudio", .libPaths(), ignore.case=TRUE))) # { # if (grepl("win",Sys.info()["sysname"],ignore.case=TRUE)) # { # windows() # } else X11() # } else # { # dev.new() # } # # Dummies to fool R CMD check: # graph <- minimum <- maximum <- esize <- vsize <- asize <- graph <- cbox <- filename <- dimensions <- OnTheFly <- LatSize <- GraphType <- FAopts <- File <- Control <- NULL # # ### Correlation matrix GUI: # if (corMat) # { # covMat <- input # if (any(diag(input)!=1)) input <- round(cov2cor(input),12) # # qgraph.setup <- function(panel) { # if (isTRUE(panel$OnTheFly)) qgraph.draw(panel) # panel # } # # qgraph.draw <- function(panel) { # panel$details <- panel$cbox[1] # panel$bg <- panel$transparency <-panel$cbox[2] # panel$overlay <- panel$cbox[3] # panel$borders <- panel$cbox[4] # panel$legend <- panel$cbox[5] # # panel$width <- as.numeric(panel$dimensions[1]) # # panel$height <- as.numeric(panel$dimensions[2]) # if (panel$GraphType=="EFA") # { # panel2 <- panel # panel2$vsize <- c(panel$vsize,panel$LatSize) # panel2$dat <- covMat # panel2$factors <- as.numeric(panel$FAopts[1]) # panel2$rotation <- panel$FAopts[2] # do.call(qgraph.efa,panel2) # panel2$graph <- NULL # } else if (panel$GraphType == "PCA") # { # panel2 <- panel # panel2$vsize <- c(panel$vsize,panel$LatSize) # panel2$cor <- covMat # panel2$factors <- as.numeric(panel$FAopts[1]) # panel2$rotation <- panel$FAopts[2] # panel2$graph <- NULL # do.call(qgraph.pca,panel2) # } else # { # panel$graph <- panel$GraphType # do.call(qgraph,panel) # } # panel # } # qgraph.newplot <- function(panel) # { # if (any(grepl("RStudio", .libPaths(), ignore.case=TRUE))) # { # if (grepl("win",Sys.info()["sysname"],ignore.case=TRUE)) # { # windows() # } else X11() # } else # { # dev.new() # } # qgraph.draw(panel) # panel # } # # qgraph.save <- function(panel) { # panel2 <- panel # panel2$width <- par("din")[1] # panel2$height <- par("din")[2] # panel2$filename <- panel$File[1] # panel2$filetype <- panel$File[2] # qgraph.draw(panel2) # panel # } # # qgraph.panel <- rp.control("qgraph GUI", input = input, ...) # # rp.checkbox(qgraph.panel, OnTheFly ,qgraph.setup, title="Plot on the fly", pos = list(column=0,row=0), initval=FALSE) # # rp.slider(qgraph.panel, minimum, 0, 1 , qgraph.setup, "Minimum", initval = 0, showvalue = TRUE, pos = list(column=0,row=1)) # rp.slider(qgraph.panel, cut, 0, 1 , qgraph.setup, "Cutoff", initval = 0.4, showvalue = TRUE, pos = list(column=0,row=2)) # rp.slider(qgraph.panel, maximum, 0, 1 , qgraph.setup, "Maximum", initval = 1, showvalue = TRUE, pos = list(column=0,row=3)) # # rp.slider(qgraph.panel, esize, 0, 20 , qgraph.setup, "Edge width", initval = 4, showvalue = TRUE, pos = list(column=1,row=1)) # rp.slider(qgraph.panel, vsize, 0, 20, qgraph.setup, "Node size", initval = 2, showvalue = TRUE, pos = list(column=1,row=2)) # rp.slider(qgraph.panel, LatSize, 1, 20 , qgraph.setup, "Latent size (FA)", initval = 5, showvalue = TRUE, pos = list(column=1,row=3)) # # rp.radiogroup(qgraph.panel, GraphType, c("association", "concentration", "factorial", "EFA", "PCA"), title = "Graph", action = qgraph.setup, pos = list(column=0,row=4)) # # # rp.textentry(qgraph.panel, FAopts, qgraph.setup, initval = c("1","promax"), pos = list(column=1,row=4),title="FA Options", labels = c("# Factors","Rotation")) # # # rp.radiogroup(qgraph.panel, layout, c("circular", "spring", "tree"), title = "Layout", action = qgraph.setup, pos = list(column=0,row=5)) # # rp.checkbox(qgraph.panel, cbox,qgraph.setup, labels = c("Details","Background", "Overlay","Borders","Legend"), title="Options", pos = list(column=1,row=5), initval=c(FALSE,FALSE,FALSE,TRUE,TRUE)) # # rp.textentry(qgraph.panel, File, qgraph.setup, initval = c("qgraph","pdf"), pos = list(column=1,row=6),title="Output", labels = c("Name","Type")) # # # rp.textentry(qgraph.panel, dimensions, qgraph.setup, initval = c(7,7), pos = list(column=1,row=4),labels = c("Width","Height"),title="Dimensions (enter to confirm)") # # rp.button(qgraph.panel, action = qgraph.draw, title = "Plot", ,pos = list(column=0,row=7)) # # rp.button(qgraph.panel, action = qgraph.newplot, title = "New" ,pos = list(column=1,row=7)) # # rp.button(qgraph.panel, action = qgraph.save, title = "Save", pos = list(column=0,row=6)) # # } else { # # Default GUI: # # qgraph.setup <- function(panel) { # if (isTRUE(panel$OnTheFly)) qgraph.draw(panel) # panel # } # # qgraph.draw <- function(panel) { # if (panel$Control[1]!="") { # panel$minimum <- as.numeric(panel$Control[1]) # } else { # panel$minimum <- NULL # } # # if (panel$Control[2]!="") { # panel$cut <- as.numeric(panel$Control[1]) # } else { # panel$cut <- NULL # } # # if (panel$Control[3]!="") { # panel$maximum <- as.numeric(panel$Control[3]) # } else { # panel$maximum <- NULL # } # # if (panel$Control[4]!="") { # panel$esize <- as.numeric(panel$Control[4]) # } else { # panel$esize <- NULL # } # # if (panel$Control[5]!="") { # panel$vsize <- as.numeric(panel$Control[5]) # } else { # panel$vsize <- NULL # } # # if (panel$Control[6]!="") { # panel$asize <- as.numeric(panel$Control[6]) # } else { # panel$asize <- NULL # } # # panel$details <- panel$cbox[1] # panel$bg <- panel$transparency <-panel$cbox[2] # panel$overlay <- panel$cbox[3] # panel$borders <- panel$cbox[4] # panel$legend <- panel$cbox[5] # do.call(qgraph,panel) # panel # } # qgraph.newplot <- function(panel) # { # if (any(grepl("RStudio", .libPaths(), ignore.case=TRUE))) # { # if (grepl("win",Sys.info()["sysname"],ignore.case=TRUE)) # { # windows() # } else X11() # } else # { # dev.new() # } # qgraph.draw(panel) # panel # } # # qgraph.save <- function(panel) { # panel2 <- panel # panel2$width <- par("din")[1] # panel2$height <- par("din")[2] # panel2$filename <- panel$File[1] # panel2$filetype <- panel$File[2] # qgraph.draw(panel2) # panel # } # # qgraph.panel <- rp.control("qgraph GUI", input = input, ...) # # rp.checkbox(qgraph.panel, OnTheFly ,qgraph.setup, title="Plot on the fly", pos = list(column=0,row=0), initval=FALSE) # # rp.textentry(qgraph.panel, Control, qgraph.setup, initval = rep("",6), pos = list(column=1,row=1),title="Control", labels = c("Minimum","Cut","Maximum","Edge Width", "Node Size", "Arrow Size")) # # # rp.checkbox(qgraph.panel, cbox,qgraph.setup, labels = c("Details","Background", "Overlay","Borders","Legend"), title="Options", pos = list(column=0,row=1), initval=c(FALSE,FALSE,FALSE,TRUE,TRUE)) # # rp.radiogroup(qgraph.panel, layout, c("circular", "spring"), title = "Layout", action = qgraph.setup, pos = list(column=1,row=2)) # # # rp.textentry(qgraph.panel, File, qgraph.setup, initval = c("qgraph","pdf"), pos = list(column=1,row=3),title="Output", labels = c("Name","Type")) # # # rp.textentry(qgraph.panel, dimensions, qgraph.setup, initval = c(7,7), pos = list(column=1,row=4),labels = c("Width","Height"),title="Dimensions (enter to confirm)") # # rp.button(qgraph.panel, action = qgraph.draw, title = "Plot", ,pos = list(column=0,row=4)) # # rp.button(qgraph.panel, action = qgraph.newplot, title = "New" ,pos = list(column=1,row=4)) # # rp.button(qgraph.panel, action = qgraph.save, title = "Save", pos = list(column=0,row=3)) # } } qgraph/R/SelfLoop.R0000644000176200001440000000615314430573263013617 0ustar liggesusersSelfLoop <- function(x,y,rotation=0,cex,cex2,shape,residual=FALSE,resScale=1, polygonList = polygonList, offset = 0) { if (missing(polygonList)) { polygonList = list( ellipse = ELLIPSEPOLY, heart = HEARTPOLY, star = STARPOLY ) } # If shape is rectangle, compute square on largext cex and move to border: if (shape == "rectangle") { loop <- SelfLoop(x,y,rotation,min(cex,cex2),min(cex,cex2),"square",residual,resScale) xOff <- (Cent2Edge(x,y,pi/2,cex,cex2,shape, offset=offset,polygonList = polygonList)[1] - x) yOff <- (Cent2Edge(x,y,0,cex,cex2,shape, offset=offset,polygonList = polygonList)[2] - y) SmallX <- (Cent2Edge(x,y,pi/2,min(cex,cex2),min(cex,cex2),"square", offset=offset,polygonList = polygonList)[1] - x) SmallY <- (Cent2Edge(x,y,0,min(cex,cex2),min(cex,cex2),"square", offset=offset,polygonList = polygonList)[2] - y) # Move up or down: if (cex2 > cex) { if (any(loop$x[c(1,length(loop$x))] > x-xOff & loop$x[c(1,length(loop$x))] < x+xOff)) { if (cos(rotation) > 0) { # Move up: loop$y <- loop$y + yOff - SmallY } else { # Move down: loop$y <- loop$y - yOff + SmallY } } } else { if (any(loop$y[c(1,length(loop$y))] > y-yOff & loop$y[c(1,length(loop$y))] < y+yOff)) { if (sin(rotation) > 0) { # Move right: loop$x <- loop$x + xOff - SmallX } else { # Move left: loop$x <- loop$x - xOff + SmallX } } } return(loop) } loopAngle <- pi/8 if (!residual) { Cent <- Cent2Edge(x,y,rotation,cex,cex2,shape,offset=offset,polygonList = polygonList) Cent[1] <- x + 1.5*(Cent[1]-x) Cent[2] <- y + 1.5*(Cent[2]-y) LoopPointsRight <- Cent2Edge(x,y,loopAngle + rotation,cex,cex2,shape, offset=offset,polygonList = polygonList) LoopPointsLeft <- Cent2Edge(x,y,(-1*loopAngle + rotation),cex,cex2,shape, offset=offset,polygonList = polygonList) Circ <- lapply(seq(1.5*pi+ rotation,2.5*pi + rotation,length=4),Cent2Edge,x=Cent[1],y=Cent[2],cex=0.8*min(cex,cex2),cex2=0.8*min(cex,cex2),shape="circle", polygonList = polygonList) # deg <- atan2usr2in(LoopPointsRight[1] - LoopPointsLeft[1], LoopPointsRight[2] - LoopPointsLeft[2]) # Circ <- lapply(seq(deg-pi,deg,length=4),Cent2Edge,x=Cent[1],y=Cent[2],cex=0.8*mean(cex,cex2),cex2=0.8*mean(cex,cex2),shape="circle") CircX <- sapply(Circ,'[',1) CircY <- sapply(Circ,'[',2) CircX <- c(LoopPointsLeft[1],CircX,LoopPointsRight[1]) CircY <- c(LoopPointsLeft[2],CircY,LoopPointsRight[2]) spl <- xspline(CircX,CircY,1,draw=FALSE) return(spl) } else { Start <- Cent2Edge(x,y,rotation,cex,cex2,shape,offset=resScale ,polygonList = polygonList) End <- Cent2Edge(x,y,rotation,cex,cex2,shape,offset=offset,polygonList = polygonList) # Start <- c(0,0) # Start[1] <- x + 2*(End[1]-x) # Start[2] <- y + 2*(End[2]-y) spl <- xspline(c(Start[1],End[1]),c(Start[2],End[2]),1,draw=FALSE) return(spl) } }qgraph/R/getArgs.R0000644000176200001440000000055514430573263013470 0ustar liggesusersgetArgs <- function(args) { if (length(args)>0) { isqgraph <- sapply(args,function(x)"qgraph"%in%class(x)) argLists <- c(lapply(args[isqgraph],'[[','Arguments'),lapply(args[isqgraph],'[','layout')) args <- args[!isqgraph] newArgs <- lapply(argLists,getArgs) for (l in newArgs) args <- c(args,l[!names(l)%in%names(args)]) } return(args) }qgraph/R/pie2.R0000644000176200001440000000323714430573263012733 0ustar liggesusers# Copied from base R with some changes pie2 <- function (x, label = "", radius = 0.8, pie.bord=.1, pie.col='white', pie.col2 = 'grey', bg = 'white', border.width = 1) { x <- c(1-x, x) # aux functions t2xy <- function(t, radius) { t2p <- twopi * t + init.angle * pi/180 list(x = radius * cos(t2p), y = radius * sin(t2p)) } # fixed quanities init.angle <- 90 edges = 200 angle <- 45 density = c(NULL, NULL) lty = c(NULL, NULL) clockwise = FALSE col = c(pie.col2, pie.col) border = c(TRUE, TRUE) radius2 <- radius - radius*pie.bord n <- 200 # compute some aux variables x <- c(0, cumsum(x)/sum(x)) dx <- diff(x) nx <- length(dx) twopi <- 2 * pi # set up plotting area plot.new() pin <- par("pin") xlim <- ylim <- c(-1, 1) plot.window(xlim, ylim, "", asp = 1) par("fg") # browser() # plot pie chart for (i in 1L:nx) { P <- t2xy(seq.int(x[i], x[i + 1], length.out = n), radius) polygon(c(P$x, 0), c(P$y, 0), density = density[i], angle = angle[i], border = border[i], col = col[i], lty = lty[i], lwd = border.width) } border2 <- TRUE # plot node on top P2 <- t2xy(seq.int(x[1], x[3], length.out = n), radius2) polygon(c(P2$x, 0), c(P2$y, 0), density = NULL, angle = 45, border=TRUE, col=bg, lty=NULL, lwd = border.width) P3 <- t2xy(seq.int(x[1], x[3], length.out = n), radius2-radius2*.001) polygon(c(P2$x, 0), c(P2$y, 0), density = NULL, angle = 45, border=FALSE, col=bg, lty=NULL, lwd = border.width) # node labels text(0,0,label) } qgraph/R/drawNode.R0000644000176200001440000002357714430573263013650 0ustar liggesusersdarken <- function(x, dark = 0.25){ sapply(x,function(xx){ col <- c(col2rgb(xx))/255 col <- (1-dark)*col rgb(col[1],col[2],col[3]) }) } drawNode <- function(x, y, shape, cex1, cex2, border, vcolor, bcolor, border.width, polygonList, bars, barSide, barColor, barLength, barsAtSide, font = 1, usePCH = TRUE, resolution = 100, noPar = FALSE, bw = FALSE, density = NULL, angle = NULL, mean, SD, meanRange, pie, pieColor = NA, pieColor2 = "white", pieBorder = 0.15, pieStart = 0, pieDarken = 0.25, pastel = FALSE,rainbowStart=0,equalPieColor=FALSE) { if (!is.null(pie) && !shape %in% c("circle", "square",names(polygonList))){ stop("Pie charts only supported for shape = 'circle' or shape = 'square'") } if (shape %in% c("circle","square","triangle","diamond")) { if (is.null(pie) && (usePCH | shape %in% c("triangle","diamond"))) { if (shape=="circle") { pch1=16 pch2=1 } if (shape=="square") { pch1=15 pch2=0 } if (shape=="triangle") { pch1=17 pch2=2 } if (shape=="diamond") { pch1=18 pch2=5 } points(x, y, ,cex=cex1,col=vcolor,lwd=border.width,pch=pch1) if (border) { points(x, y, ,cex=cex1,col=bcolor,lwd=border.width,pch=pch2) } } else { if (is.null(pie) && shape == "square") { xOff <- Cent2Edge(x,y,pi/2,cex1,cex1,shape, noPar = noPar)[1] - x yOff <- Cent2Edge(x,y,0,cex1,cex1,shape, noPar = noPar)[2] - y if(bw) # Plot a white background behind each node to avoid transparency due to density in the next line { rect(x-xOff,y-yOff,x+xOff,y+yOff,col="white",border=NA) } # Plot background: rect(x-xOff,y-yOff,x+xOff,y+yOff,col=vcolor,border=NA, density = density, angle = angle) if (border) { rect(x-xOff,y-yOff,x+xOff,y+yOff,border=bcolor,lwd=border.width) } } else { Coord <- lapply(seq(0,2*pi,length=resolution),function(r)Cent2Edge(x,y,r,cex1,cex2,shape, noPar = noPar)) xs <- sapply(Coord,'[[',1) ys <- sapply(Coord,'[[',2) if (border) bord <- bcolor else bord <- NA if(bw) # Plot a white background behind each node to avoid transparency due to density in the next line { polygon(xs, ys, lwd=border.width, border = bord, col = "white") } # Draw the node: polygon(xs, ys, lwd=border.width, border = bord, col = vcolor, density = density, angle = angle) # Draw the pie diagram: if (!is.null(pie)){ # If any element not in (0,1),stop: if(any(pie < 0 | pie > 1)) stop("All elements in 'pie' must be between 0 and 1.") # Check sum: if (sum(pie) > 1){ stop("Sum of 'pie' argument may not be greater than 1 for any node.") } # Rep arguments: if (length(pie) != length(pieColor)){ pieColor <- rep(pieColor,length=length(pie)) } # Add NA colors: if (any(is.na(pieColor))){ # if length = 1, inherit from node color but 50% darker: if (length(pie) == 1){ pieColor <- darken(vcolor,pieDarken) } else { if (equalPieColor){ pieColor[is.na(pieColor)] <- darken(vcolor,pieDarken) } else { if (!pastel){ pieColor[is.na(pieColor)] <- rainbow(sum(is.na(pieColor))) } else { pieColor[is.na(pieColor)] <- rainbow_hcl(sum(is.na(pieColor)), start = rainbowStart * 360, end = (360 * rainbowStart + 360*(sum(is.na(pieColor))-1)/sum(is.na(pieColor)))) } } } } # Add sum != 1, add one part and white color: if (sum(pie)!=1){ pie <- c(pie,1-sum(pie)) pieColor <- c(pieColor,pieColor2) } nPie <- length(pie) pie <- c(0,cumsum(pie)) pie <- pieStart + pie # Shift pie diagram for (i in seq_len(nPie)){ # Step 1: compute first pie part: innerCoord <- lapply(seq(pie[i]*2*pi,pie[i+1]*2*pi,length=resolution),function(r)Cent2Edge(x,y,r,(1-pieBorder) * cex1,(1-pieBorder) * cex2,shape, noPar = noPar)) innerXs <- sapply(innerCoord,'[[',1) innerYs <- sapply(innerCoord,'[[',2) outerCoord <- lapply(seq(pie[i]*2*pi,pie[i+1]*2*pi,length=resolution),function(r)Cent2Edge(x,y,r,cex1,cex2,shape, noPar = noPar)) outerXs <- sapply(outerCoord,'[[',1) outerYs <- sapply(outerCoord,'[[',2) pie1Xs <- c(outerXs,rev(innerXs)) pie1Ys <- c(outerYs,rev(innerYs)) # Plot first pie part: polygon(pie1Xs, pie1Ys, lwd=border.width, border = bord, col = pieColor[i]) } } } } } else if (shape == "rectangle") { xOff <- Cent2Edge(x,y,pi/2,cex1,cex2,shape, noPar = noPar)[1] - x yOff <- Cent2Edge(x,y,0,cex1,cex2,shape, noPar = noPar)[2] - y if(bw) { # Plot a white background behind each node to avoid transparency due to density in the next line rect(x-xOff,y-yOff,x+xOff,y+yOff,col="white",border=NA) } # Plot background: rect(x-xOff,y-yOff,x+xOff,y+yOff,col=vcolor,border=NA, density = density, angle = angle) if (border) { rect(x-xOff,y-yOff,x+xOff,y+yOff,border=bcolor,lwd=border.width) } } else if (shape %in% names(polygonList)) { xOff <- Cent2Edge(x,y,pi/2,cex1,cex2,"rectangle", noPar = noPar)[1] - x yOff <- Cent2Edge(x,y,0,cex1,cex2,"rectangle", noPar = noPar)[2] - y if (border) bord <- bcolor else bord <- NA if(bw) { # Plot a white background behind each node to avoid transparency due to density in the next line polygon(x + polygonList[[shape]]$x * xOff, y + polygonList[[shape]]$y * yOff, lwd=border.width, border = bord, col = "white") } polygon(x + polygonList[[shape]]$x * xOff, y + polygonList[[shape]]$y * yOff, lwd=border.width, border = bord, col = vcolor, density = density, angle = angle) # Draw the pie diagram: if (!is.null(pie)){ if (pieStart != 0){ stop("'pieStart' argument not supported when using shape = 'heart', shape = 'star' or shape = 'ellipse'") } # If any element not in (0,1),stop: if(any(pie < 0 | pie > 1)) stop("All elements in 'pie' must be between 0 and 1.") # Check sum: if (sum(pie) > 1){ stop("Sum of 'pie' argument may not be greater than 1 for any node.") } # Rep arguments: if (length(pie) != length(pieColor)){ pieColor <- rep(pieColor,length=length(pie)) } # Add NA colors: if (any(is.na(pieColor))){ # if length = 1, inherit from node color but 50% darker: if (length(pie) == 1){ pieColor <- darken(vcolor,pieDarken) } else { if (!pastel){ pieColor[is.na(pieColor)] <- rainbow(sum(is.na(pieColor))) } else { pieColor[is.na(pieColor)] <- rainbow_hcl(sum(is.na(pieColor)), start = rainbowStart * 360, end = (360 * rainbowStart + 360*(sum(is.na(pieColor))-1)/sum(is.na(pieColor)))) } } } # Add sum != 1, add one part and white color: if (sum(pie)!=1){ pie <- c(pie,1-sum(pie)) pieColor <- c(pieColor,pieColor2) } nPie <- length(pie) pie <- c(0,cumsum(pie)) pie <- pieStart + pie # Shift pie diagram Inds <- 1 + round(pie * (length(polygonList[[shape]]$x)-1)) for (i in seq_len(nPie)){ # Step 1: compute first pie part: innerXs <- x + polygonList[[shape]]$x[Inds[i]:Inds[i+1]] * xOff * (1-pieBorder) innerYs <- y + polygonList[[shape]]$y[Inds[i]:Inds[i+1]] * yOff * (1-pieBorder) outerXs <- x + polygonList[[shape]]$x[Inds[i]:Inds[i+1]] * xOff outerYs <- y + polygonList[[shape]]$y[Inds[i]:Inds[i+1]] * yOff pie1Xs <- c(outerXs,rev(innerXs)) pie1Ys <- c(outerYs,rev(innerYs)) # Plot first pie part: polygon(pie1Xs, pie1Ys, lwd=border.width, border = bord, col = pieColor[i]) } } } else stop(paste("Shape",shape,"is not supported or included in 'polygonList'.")) ### ADD BARS #### if (!is.null(bars)) { if (any(bars < 0) | any(bars > 1)) { warning("Bar detected < 0 or > 1, unexpected results might occur.") } for (i in seq_along(bars)) { IntInNode(t(c(x,y)),cex1,cex2,shape,bars[i],width=barLength,triangles=FALSE,col=barColor,barSide,!barsAtSide) } } ### Add means in same way: if (!is.na(mean)){ meanScaled <- (mean - meanRange[1]) / (meanRange[2] - meanRange[1]) endScaled <- min(1,(mean + 2*SD - meanRange[1]) / (meanRange[2] - meanRange[1])) startScaled <- max(0,(mean - 2*SD - meanRange[1]) / (meanRange[2] - meanRange[1])) # Draw mean: IntInNode(t(c(x,y)),cex1,cex2,shape,meanScaled,width=0.5,triangles=FALSE,col=barColor,barSide,!barsAtSide) # Draw SD ends: if (endScaled < 1){ IntInNode(t(c(x,y)),cex1,cex2,shape,endScaled,width=0.25,triangles=FALSE,col=barColor,barSide,!barsAtSide) } if (startScaled > 0){ IntInNode(t(c(x,y)),cex1,cex2,shape,startScaled,width=0.25,triangles=FALSE,col=barColor,barSide,!barsAtSide) } # Draw horizontal bar: IntInNode(t(c(x,y)),cex1,cex2,shape,mean(c(startScaled,endScaled)),width=endScaled - startScaled,triangles=FALSE,col=barColor,barSide,!barsAtSide, flip=TRUE) } } qgraph/R/RcppExports.R0000644000176200001440000000063714521123775014366 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 qgraph_layout_Cpp <- function(pniter, pvcount, pecount, maxdelta, parea, pcoolexp, prepulserad, Ef, Et, W, xInit, yInit, Cx, Cy, digits) { .Call(`_qgraph_qgraph_layout_Cpp`, pniter, pvcount, pecount, maxdelta, parea, pcoolexp, prepulserad, Ef, Et, W, xInit, yInit, Cx, Cy, digits) } qgraph/R/addTitle.R0000644000176200001440000000026314430573263013622 0ustar liggesusersaddTitle <- function(x, cex = 1) { text(par('usr')[1] + (par('usr')[2] - par('usr')[1])/40 ,par("usr")[4] - (par('usr')[4] - par('usr')[3])/40,x, adj = c(0,1),cex=cex) } qgraph/R/scale2.R0000644000176200001440000000026614430573263013244 0ustar liggesusersscale2 <- function(x) { if (all(is.na(x))) return(NA) if (sd(x,na.rm=TRUE)!=0){ return((x-mean(x,na.rm=TRUE))/sd(x,na.rm=TRUE)) } else { return(rep(0, length(x))) } }qgraph/R/EBICgraph.R0000644000176200001440000000215414430573263013615 0ustar liggesusersEBICgraph <- function( S, # Sample covariance matrix adj, # adjacency matrix of undirected graph n, # Sample size gamma = 0.5) { stopifnot(isSymmetric(adj)) # Check for positive definite: if(any(eigen(S)$values < 0)) { if (n > nrow(S)) { warning("Correlation/covariance matrix is not positive definite, yet sample size is higher than number of variables. Finding nearest positive definite matrix") S <- as.matrix(Matrix::nearPD(S, keepDiag = TRUE, ensureSymmetry = TRUE)$mat) } else stop("Correlation/covariance matrix is not positive definite, and sample size is lower than or equal to the number of variables.") } diag(adj) <- 1 # Compute zeroes: zeroes <- which(adj==0,arr.ind=TRUE) # Fit network: if (nrow(zeroes)>0) res <- glasso::glasso(S, 0, zero=zeroes) else res <- glasso(S, 0) # Compute EBIC: C <- res$wi # L <- n/2 * (log(det(C)) - sum(diag(S %*% C))) # L <- logGaus (S, C, n) # E <- sum(C[lower.tri(C,diag=TRUE)] != 0) # p <- nrow(C) # # # return EBIC: # -2 * L + E * log(n) + 4 * E * gamma * log(p) EBIC(S, C, n, gamma) } qgraph/R/igraphConversion.R0000644000176200001440000000404414430573263015411 0ustar liggesusers# library("igraph") as.igraph.qgraph <- function(x,...,attributes=TRUE) { if (!"qgraph"%in%class(x)) { stop("Input must be qgraph x") } # Extract graph: edgesort <- x$graphAttributes$Graph$edgesort E <- as.matrix(as.data.frame(x$Edgelist[c("from","to")])) E <- E[edgesort,,drop=FALSE] srt <- cbind(pmin(E[,1],E[,2]),pmax(E[,1],E[,2])) Dir <- x$Edgelist$directed[edgesort] Bi <- x$Edgelist$bidirectional[edgesort] Graph <- graph.edgelist(E, any(Dir)) E(Graph)$weight <- x$Edgelist$weight[edgesort] # Arrow mode: aMode <- ifelse(Dir,2,0) # Set duplicated and bidir to doubleheaded: aMode <- ifelse(Bi & (duplicated(srt)|duplicated(srt,fromLast=TRUE)), 3, aMode) # Store in graph: E(Graph)$arrow.mode <- aMode ## Set attributes: if (attributes) { ## Node attributes: V(Graph)$frame.color <- x$graphAttributes$Nodes$border.color V(Graph)$frame.color[!x$graphAttributes$Nodes$borders] <- NA # V(Graph)$label.cex <- x$graphAttributes$Nodes$label.cex V(Graph)$label.color <- x$graphAttributes$Nodes$label.color V(Graph)$label <- x$graphAttributes$Nodes$labels V(Graph)$shape <- x$graphAttributes$Nodes$shape V(Graph)$shape[!V(Graph)$shape%in%c("circle", "square", "csquare", "rectangle", "crectangle", "vrectangle", "pie")] <- "none" V(Graph)$color <- x$graphAttributes$Nodes$color V(Graph)$size <- x$graphAttributes$Nodes$width / max((-1/72)*(x$nNodes)+5.35,1) * 4 V(Graph)$size2 <- x$graphAttributes$Nodes$height / max((-1/72)*(x$nNodes)+5.35,1) * 4 ## Edge attributes: E(Graph)$curved <- x$graphAttributes$Edges$curve[edgesort] E(Graph)$color <- x$graphAttributes$Edges$color[edgesort] if (is.character(x$graphAttributes$Edges$labels)) E(Graph)$label <- x$graphAttributes$Edges$labels[edgesort] if (!is.null(x$graphAttributes$Edges$label.color)) E(Graph)$label.color <- x$graphAttributes$Edges$label.color[edgesort] E(Graph)$lty <- x$graphAttributes$Edges$lty[edgesort] E(Graph)$width <- x$graphAttributes$Edges$width[edgesort] } return(Graph) } qgraph/R/centralityPlot.R0000644000176200001440000000651314430573263015111 0ustar liggesuserscentralityPlot <- function(..., labels, scale = c("raw0","raw","z-scores", "relative"), include = c("Degree","Strength","OutDegree","InDegree","OutStrength","InStrength"), theme_bw = TRUE, print = TRUE, verbose = TRUE, standardized, relative, weighted = TRUE, signed = TRUE, orderBy = "default", # Can also be one of the measures decreasing = FALSE ) { if (any(include=="all") | any(include=="All")){ include <- c("Degree","Strength","OutDegree","InDegree","OutStrength","InStrength","Closeness","Betweenness", "ExpectedInfluence","OutExpectedInfluence","InExpectedInfluence") } scale <- match.arg(scale) if (!missing(standardized)){ warning("'standardized' argument is deprecated and will be removed.") } else { standardized <- scale == "z-scores" } if (!missing(relative)){ warning("'relative' argument is deprecated and will be removed.") } else { relative <- scale == "relative" } if (scale == "z-scores"){ if (verbose) message("Note: z-scores are shown on x-axis rather than raw centrality indices.") } if (scale == "relative"){ if (verbose) message("Note: relative centrality indices are shown on x-axis rather than raw centrality indices.") } # Some dummies to get rid of NOTES: measure <- NULL value <- NULL node <- NULL type <- NULL ## I realize I should have used a more appropriate programmatic way of doing this. My ## programming is bad and I fo feel bad. Long <- centralityTable(..., standardized=standardized, labels=labels, relative=relative, weighted = weighted, signed = signed) # If not missing, include only include vars: # if (!missing(include)) # { Long <- subset(Long, measure %in% include) # } # Re-order: Long$measure <- factor(Long$measure,levels = include) # Ordereing by node name to make nice paths: if (orderBy == "default"){ nodeLevels <- unique(gtools::mixedsort(as.character(Long$node), decreasing = decreasing)) } else { nodeLevels <- names(sort(tapply(Long$value[Long$measure == orderBy],Long$node[Long$measure == orderBy],mean), decreasing=decreasing)) } Long$node <- factor(as.character(Long$node), levels = nodeLevels) Long <- Long[gtools::mixedorder(Long$node),] # # Long <- Long[gtools::mixedorder(Long$node),] # Long$node <- factor(as.character(Long$node), levels = unique(gtools::mixedsort(as.character(Long$node)))) # } else { # # Long <- Long[gtools::mixedorder(Long[[orderBy]]),] # Long$node <- factor(as.character(Long$node), levels = unique(gtools::mixedsort(as.character(Long$node)))) # } # PLOT: if (length(unique(Long$type)) > 1) { g <- ggplot(Long, aes(x = value, y = node, group = type, colour = type)) } else { g <- ggplot(Long, aes(x = value, y = node, group = type)) } g <- g + geom_path() + xlab("") + ylab("") + geom_point() if (length(unique(Long$graph)) > 1) { g <- g + facet_grid(graph ~ measure, scales = "free") } else { g <- g + facet_grid( ~ measure, scales = "free") } if (theme_bw){ g <- g + theme_bw() } if (scale == "raw0"){ g <-g + xlim(0,NA) } if (print){ print(g) invisible(g) } else { return(g) } } qgraph/R/centralityTable.R0000644000176200001440000000714614430573263015225 0ustar liggesuserscentralityTable <- function(..., labels, standardized=TRUE, relative = FALSE, weighted = TRUE, signed = TRUE) { Wmats <- getWmat(list(...)) # Check for single node: for (i in seq_along(Wmats)){ if (is.matrix(Wmats[[i]])){ if (ncol(Wmats[[i]]) == 1){ stop("Not supported for single-node graphs") } } else { if (any(sapply(Wmats[[i]],ncol)==1)){ stop("Not supported for single-node graphs") } } } # Fix names: names(Wmats) <- fixnames(Wmats,"graph ") CentAuto <- lapply(Wmats, centrality_auto, weighted = weighted, signed = signed) # Fix tables: for (g in seq_along(CentAuto)) { if (!is(CentAuto[[g]],"centrality_auto")) { # Set type graph and labels: names(CentAuto[[g]]) <- fixnames(CentAuto[[g]],"type ") for (t in seq_along(CentAuto[[g]])) { # Set labels: if (!missing(labels)) { CentAuto[[g]][[t]][['node.centrality']][['node']] <- labels } else if(!is.null(rownames(CentAuto[[g]][[t]][['node.centrality']]))) { CentAuto[[g]][[t]][['node.centrality']][['node']] <- rownames(CentAuto[[g]][[t]][['node.centrality']]) } else CentAuto[[g]][[t]][['node.centrality']][['node']] <- paste("Node",seq_len(nrow(CentAuto[[g]][[t]][['node.centrality']]))) CentAuto[[g]][[t]]$node.centrality$graph <- names(CentAuto)[g] CentAuto[[g]][[t]]$node.centrality$type <- names(CentAuto[[g]])[t] } } else { # Set graph: CentAuto[[g]]$node.centrality$graph <- names(CentAuto)[g] # Set labels: if (!missing(labels)) { CentAuto[[g]][['node.centrality']][['node']] <- labels } else if(!is.null(rownames(CentAuto[[g]][['node.centrality']]))) { CentAuto[[g]][['node.centrality']][['node']] <- rownames(CentAuto[[g]][['node.centrality']]) } else CentAuto[[g]][['node.centrality']][['node']] <- paste("Node",seq_len(nrow(CentAuto[[g]][['node.centrality']]))) } } # If lists, fix: isList <- sapply(CentAuto,function(x)!"centrality_auto"%in%class(x)) if (any(isList)) { for (l in which(isList)) { CentAuto <- c(CentAuto,CentAuto[[l]]) } CentAuto <- CentAuto[-which(isList)] } # Add method and labels to tables: for (i in seq_along(CentAuto)) { # Relativate or standardize: if (relative | standardized ) { if (relative & standardized) { warning("Using 'relative' and 'standardized' together is not recommended") } for (j in which(sapply(CentAuto[[i]][['node.centrality']],mode)=="numeric")) { if (standardized) { # Standardize: CentAuto[[i]][['node.centrality']][,j] <- scale2(CentAuto[[i]][['node.centrality']][,j]) } if (relative) { mx <- max(abs(CentAuto[[i]][['node.centrality']][,j]), na.rm = TRUE) if (mx != 0) { CentAuto[[i]][['node.centrality']][,j] <- CentAuto[[i]][['node.centrality']][,j] / mx } } # Remove attributes: attributes(CentAuto[[i]][['node.centrality']][,j]) <- NULL } } } ## WIDE FORMAT TABLE: WideCent <- rbind.fill(lapply(CentAuto,'[[','node.centrality')) if (is.null(WideCent$type)) WideCent$type <- NA # LONG FORMAT: LongCent <- reshape2::melt(WideCent, variable.name = "measure", id.var = c("graph","type", "node")) if (any(is.nan(LongCent$value))){ warning("NaN detected in centrality measures. Try relative = FALSE") } return(LongCent) }qgraph/R/centrality.R0000644000176200001440000001632114473011655014247 0ustar liggesusers centrality <- function(graph,alpha=1,posfun=abs,pkg = c("igraph","qgraph"),all.shortest.paths=FALSE, weighted = TRUE, signed = TRUE, R2 = FALSE) { # Check for correct class: # if (class(graph) != "qgraph") stop("Must be a 'qgraph' object") # if (!is.null(graph[['graphAttributes']][['Graph']][['weighted']])) if (!graph[['graphAttributes']][['Graph']][['weighted']]) graph[['Edgelist']][['weight']] <- ifelse(graph[['Edgelist']][['weight']]==0,0,1) # if (!isTRUE(graph[['graphAttributes']][['Graph']][['minimum']] == 0)) # { # warning("Minimum in graph is not set to zero. Omitted edges will not be included in computation of centrality measures.") # } # # # Extract edgelist: # E <- graph[['Edgelist']] # # # Number of nodes: # n <- graph[['graphAttributes']][['Graph']][['nNodes']] # # ## Convert to adjacency: # W <- matrix(0,n,n) # for (i in 1:length(E$from)) # { # if (E$weight[i]!=0) # { # W[E$from[i],E$to[i]] <- E$weight[i] # if (!E$directed[i] | E$bidir[i]) W[E$to[i],E$from[i]] <- E$weight[i] # } # } W <- getWmat(graph) if (!isTRUE(weighted)){ W <- sign(W) } if (!isTRUE(signed)){ W <- abs(W) } pkg <- match.arg(pkg) # if (missing(pkg)){ # pkg <- ifelse(all(W==t(W)),"igraph","qgraph") # # } # If is list, compute for all: if (is.list(W)) { return(lapply(W,centrality, alpha=alpha,posfun=posfun)) } n <- nrow(W) # Remove diagonal: if (any(diag(W)!=0)) { # message("Self-loops are not included in centrality analysis.") diag(W) <- 0 } ## Compute adjacency: X <- 1L * (W!=0) ## Compute default measures: UnweightedDegreesOut <- rowSums(X) WeightedDegreesOut <- rowSums(posfun(W)) CombinedDegreesOut <- UnweightedDegreesOut^(1-alpha) * WeightedDegreesOut^alpha UnweightedDegreesIn <- colSums(X) WeightedDegreesIn <- colSums(posfun(W)) CombinedDegreesIn <- UnweightedDegreesIn^(1-alpha) * WeightedDegreesIn^alpha # Expected Influence InExpectedInfluence <- colSums(W) OutExpectedInfluence <- rowSums(W) # # Randomized Shortest Paths Betweenness Centrality # rspbc <- NetworkToolbox::rspbc(abs(W)) # # # Hybrid Centrality # hybrid <- NetworkToolbox::hybrid(abs(W), BC = "random") DistMat <- 1/(ifelse(posfun(W)==0,0,posfun(W)^alpha)) if (pkg=="igraph"){ igraphObject <- igraph::graph.adjacency(DistMat, weighted = TRUE, mode = "directed") # E <- cbind(c(row(W)),c(col(W)),c(W)) # E <- E[E[,3] != 0] # E[,3] <- 1/E[,3] # igraphObject <- igraph::graph_from_edgelist(E[,1:2],directed=TRUE) # E(igraphObject)$weight <- E[,3] Closeness <- igraph::closeness(igraphObject) E <- cbind(c(row(W)),c(col(W)),c(posfun(W))) # E <- E[E[,3] != 0, ] # E[,3] <- 1/E[,3] igraphObject <- igraph::graph_from_edgelist(E[,1:2, drop=FALSE],directed=TRUE) E(igraphObject)$weight <- 1/E[,3] igraphObject <- igraph::delete_edges(igraphObject, which(E(igraphObject)$weight == Inf)) Betweenness <- igraph::betweenness(igraphObject,cutoff = 1/1e-10) ShortestPaths <- igraph::shortest.paths(igraphObject, mode = "out") ls <- vector("list",n^2) Paths <- structure( ls, .Dim = c(n, n)) if (all.shortest.paths){ for (i in 1:n) { allPaths <- lapply(igraph::all_shortest_paths(igraphObject,i,V(igraphObject))$res,as.numeric) last <- sapply(allPaths,function(x)x[length(x)]) for (j in 1:n) { if (i==j){ Paths[[i,j]] <- list() } else { Paths[[i,j]] <- allPaths[last==j] } } } } } else { # Compute shortest distance using Dijkstra (code based on pseudo code on Wikipedia) # Setup: ShortestPaths <- matrix(Inf,n,n) ls <- list() for (i in 1:n^2) ls[[i]] <- numeric(0) Previous <- structure(ls, .Dim = c(n, n)) # Main loop: for (source in 1:n) { dist <- rep(Inf,n) #previous <- integer(n) # Previous node in optimal path from source dist[source] <- 0 # Distance from source to source Q <- 1:n # All nodes in the graph are unoptimized - thus are in Q while (length(Q) > 0) # The main loop { u <- Q[which.min(dist[Q])] if (dist[u] == Inf) break # all remaining vertices are inaccessible from source Q <- Q[- which(Q==u)] for (v in Q) # where v has not yet been removed from Q. { alt <- dist[u] + DistMat[u,v] if (alt < dist[v]) # Relax (u,v,a) { dist[v] <- alt Previous[[source,v]] <- which(dist + DistMat[,v] == alt) #previous[v] <- u # decrease-key v in Q # Reorder v in the Queue } } } ShortestPaths[source,] <- dist } # Compute Closeness: Closeness <- 1/rowSums(ShortestPaths) # Shortest paths function: sp <- function(i,j) { if (length(Previous[[i,j]])==0) return(list()) if (all(Previous[[i,j]] == i)) return(list(c(i,j))) paths <- do.call(c,lapply(Previous[[i,j]],sp,i=i)) paths <- lapply(paths,function(x)c(x,j)) return(paths) } # Compute shortest paths: Paths <- structure(ls, .Dim = c(n, n)) for (i in 1:n) { for (j in 1:n) { Paths[[i,j]] <- sp(i,j) } } # Number of shortest paths: NumPaths <- apply(Paths,1:2,sapply,length) # Betweenness dummy: Betweenness <- numeric(n) Gtot <- apply(Paths,1:2,sapply,length) # Compute betweenness: for (i in 1:n) { G <- apply(Paths,1:2,sapply,function(x)sum(i==unlist(x))) Grat <- G[-i,-i]/Gtot[-i,-i] Betweenness[i] <- sum(Grat[!is.nan(Grat)]) } } lab <- function(x,labs){ if (is.vector(x)){ names(x) <- labs } else { rownames(x) <- colnames(x) <- labs } return(x) } Labels <- colnames(W) # R2: if (R2){ # check if the matrix could be a GGM: W <- as.matrix(W) diag(W) <- 0 K <- diag(n) - W rownames(K) <- colnames(K) <- NULL if (any(K < -1) || any(K > 1) || !all(K == t(K)) || any(eigen(K)$values < 0)){ stop("Graph does not look like a Gaussian graphical model. R2 is only supported for a Gaussian graphical model.") } # translate to precision matrix of standardized data: K <- solve(cov2cor(solve(K))) # R^2 is simply... R2_res <- 1 - 1 / diag(K) names(R2_res) <- Labels } ### RETURN VALUES: retval <- list( OutDegree = lab(CombinedDegreesOut,Labels), InDegree = lab(CombinedDegreesIn,Labels), Closeness = lab(Closeness,Labels), Betweenness = lab(Betweenness,Labels), # rspbc = lab(as.vector(rspbc),Labels), # hybrid = lab(as.vector(hybrid),Labels), InExpectedInfluence = InExpectedInfluence, OutExpectedInfluence = OutExpectedInfluence, ShortestPathLengths = lab(ShortestPaths,Labels), ShortestPaths = lab(Paths,Labels)) if (R2){ retval$R2 <- R2_res } return(retval) } qgraph/R/qgraphD3.R0000644000176200001440000000636514430573263013552 0ustar liggesusers# colstripalpha <- function(x) # { # apply(col2rgb(x, alpha = FALSE),2,function(x)do.call(rgb,as.list(x/255))) # } # # # This function transforms a qgraph object into a D3 file. # qgraphD3 <- function( # input, # Either a qgraph object, or qgraph is run on this object # D3width = 800, # D3height = 800, # D3file = "qgraph", # showFile = TRUE, # ... # Arguments sent to qgraph. If not empty and input is qgraph object qgraph is called again # ){ # if (is(input,"qgraph")){ # if (length(list(...)) > 0){ # qgraphObject <- qgraph(input, ..., DoNotPlot = TRUE) # } else { # qgraphObject <- input # } # } else { # qgraphObject <- qgraph(input, ..., DoNotPLot = TRUE) # } # # # # Construct edge dataframe: # Links <- data.frame( # source = qgraphObject$Edgelist$from-1, # target = qgraphObject$Edgelist$to-1, # value = abs(qgraphObject$Edgelist$weight)/ max(abs(qgraphObject$Edgelist$weight)), # color = colstripalpha(qgraphObject$graphAttributes$Edges$color), # width = qgraphObject$graphAttributes$Edges$width # ) # # Links <- Links[qgraphObject$graphAttributes$Graph$edgesort,] # # # HTML file name: # Filename <- paste0(D3file,".html") # # Nodes <- data.frame( # name = qgraphObject$graphAttributes$Nodes$labels, # color = colstripalpha(qgraphObject$graphAttributes$Nodes$color), # bcolor = colstripalpha(qgraphObject$graphAttributes$Nodes$border.color), # bwidth = qgraphObject$graphAttributes$Nodes$border.width, # group = 1 # ) # # # Create D3network: # d3ForceNetwork(Links = Links, Nodes = Nodes, Source = "source", # Target = "target", Value = "value", NodeID = "name", # Group = "group", # opacity = "1", file = Filename, # linkWidth = "LINKREPLACEDUMMY", # width = D3width,height =D3height) # # # # Read the created html: # lines <- readLines(Filename) # linksLine <- grep("var links =", lines) # nodesLine <- grep("var nodes =", lines) # # # Replace links and ndoes with proper links: # lines[linksLine] <- paste("var links =", toJSONarray(Links), "; \n") # lines[nodesLine] <- paste("var nodes =", toJSONarray(Nodes), "; \n") # # # Find var link: # linkAttrLine <- grep('LINKREPLACEDUMMY', lines) # # # Append: # lines[linkAttrLine] <- # '.style("stroke", function(d) {return d.color;}) # .style("stroke-width", function(d) {return d.width;})' # # # # NODES # nodesAttrLine <- grep('.style("fill", function(d) { return color(d.group); })', lines, fixed = TRUE) # # # Append: # lines[nodesAttrLine] <- # '.style("opacity", 1) # .style("fill", function(d) {return d.color;}) # .style("stroke", function(d) {return d.bcolor;}) # .style("stroke-width", function(d) {return d.bwidth;})' # # # Remove border white: # lines <- gsub("stroke: #fff;","",lines) # # # Fill text: # lines <- gsub("text {","text {\nfill: #666;\n",lines,fixed=TRUE) # # # Fix other things: # lines <- gsub(""",'"',lines) # lines <- gsub("<",'<',lines) # # # Write file again: # # Fix and write: # writeLines(lines, Filename) # # if (showFile){ # browseURL(Filename) # } # # } qgraph/R/averageLayout.R0000644000176200001440000000173514430573263014705 0ustar liggesusers # Function averages the layout of multiple graphs: averageWmat <- function(...) { dotList <- list(...) # Get W mats: Wmats <- lapply(dotList,getWmat) # Replace list with averaged Wmats, and rescale: for (i in seq_along(Wmats)) { if (is.list(Wmats[[i]])) { Wmats[[i]] <- do.call(averageWmat,Wmats[[i]]) } if (!all(Wmats[[i]]==0)){ Wmats[[i]] <- abs(Wmats[[i]]/max(abs(Wmats[[i]]))) } else Wmats[[i]][] <- 0 } if (!(length(unique(sapply(Wmats,nrow))) == 1 | length(unique(sapply(Wmats,ncol))) == 1 )) stop("Graphs of different dimensions") avgWmat <- Reduce('+',Wmats)/length(Wmats) return(avgWmat) } averageLayout <- function(..., layout = "spring", repulsion = 1, layout.par) { avgWmat <- averageWmat(...) if (missing(layout.par)){ layout.par <- list(repulse.rad = ncol(avgWmat)^(repulsion * 3)) } Q <- qgraph(avgWmat, DoNotPlot = TRUE, layout = layout, layout.par = layout.par) return(Q$layout) }qgraph/R/Cent2EdgeNode.R0000644000176200001440000000521114430573263014434 0ustar liggesusers### CONVERTS CENTER COORDINATES TO EDGE OF NODE ###: Cent2Edge <- function(x,y,r,cex,cex2,shape,offset=0, polygonList, noPar = FALSE) { r <- r%%(2*pi) if (missing(polygonList)) { polygonList = list( ellipse = ELLIPSEPOLY, heart = HEARTPOLY, star = STARPOLY ) } if (shape %in% names(polygonList)) { xOff <- Cent2Edge(x,y,pi/2,cex,cex2,"rectangle")[1] - x yOff <- Cent2Edge(x,y,0,cex,cex2,"rectangle")[2] - y xOutline <- x + polygonList[[shape]]$x * xOff yOutline <- y + polygonList[[shape]]$y * yOff rad <- atan2usr2in(xOutline - x, yOutline - y) xNew <- xOutline[which.min(abs(rad - r))] yNew <- yOutline[which.min(abs(rad - r))] return(c(x+(cex+offset)/cex*(xNew-x),y+(cex+offset)/cex*(yNew-y))) } else { # Set mar: marOrig <- par("mar") if (!noPar) par(mar=c(0,0,0,0)) r <- r%%(2*pi) xrange <- abs(diff(par("usr")[1:2])) yrange <- abs(diff(par("usr")[3:4])) xmarrange <- sum(par("mai")[c(2,4)]) ymarrange <- sum(par("mai")[c(1,3)]) xin <- par("pin")[1] yin <- par("pin")[2] # if (shape == "circle") # { xNew <- x + ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*(cex+offset)*(1 + 1/2*(names(dev.cur())=="devSVG"))*par("csi")*sin(r)/17.5 yNew <- y + ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*(cex+offset)*(1 + 1/2*(names(dev.cur())=="devSVG"))*par("csi")*cos(r)/17.5 # } if (shape == "square") { dx <- xNew - x dy <- yNew - y widthX <- ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*1/17.5 widthY <- ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex*par("csi")*1/17.5 xNew <- x + min(abs(widthX/dx),abs(widthY/dy)) * dx yNew <- y + min(abs(widthX/dx),abs(widthY/dy)) * dy # Restore mar: if (!noPar) par(mar=marOrig) return(c(x+(cex+offset)/cex*(xNew-x),y+(cex+offset)/cex*(yNew-y))) } else if (shape == "rectangle") { dx <- xNew - x dy <- yNew - y widthX <- ((xin+xmarrange)/xin)*(7/(xin+xmarrange))*(xrange/2.16)*cex*par("csi")*1/17.5 widthY <- ((yin+ymarrange)/yin)*(7/(yin+ymarrange))*(yrange/2.16)*cex2*par("csi")*1/17.5 xNew <- x + min(abs(widthX/dx),abs(widthY/dy)) * dx yNew <- y + min(abs(widthX/dx),abs(widthY/dy)) * dy # Restore mar: if (!noPar) par(mar=marOrig) return(c(x+(cex+offset)/cex*(xNew-x),y+(cex2+offset)/cex2*(yNew-y))) } else { # Restore mar: if (!noPar) par(mar=marOrig) return(c(xNew,yNew)) } } } qgraph/R/centralityFunctions.R0000644000176200001440000003150614430573263016143 0ustar liggesusers###################################### # a wrapper for centrality measures ## ###################################### # converts a matrix to a vector mat2vec<-function(x, diag=FALSE, tol=1e-10) { # Compute weights matrix: x <- getWmat(x) if(!is.matrix(x)) stop("A matrix is required as input") symm<-ifelse(isSymmetric.matrix(object=unname(x), tol=tol), TRUE, FALSE) # detect whether the graph is directed if(diag==FALSE) { if(symm) vec<-x[upper.tri(x)] else vec<-c(x[upper.tri(x)], x[lower.tri(x)]) } else if (diag==TRUE) { vec<-as.vector(x) } vec } # Exported: centrality_auto<-function(x, weighted = TRUE, signed = TRUE) { # This function recognizes whether a network is weighted, directed and # whether there are disconnected nodes. # It computes centrality according to the matrix given as input. # If the network is disconnected, closeness is computed only for the largest component. # INPUT # x = an adjacency matrix or a weights matrix # OUTPUT # the output depends on the network given as input. # - if x is unweighted and directed # then the InDegree, the OutDegree, the unweighted node betweenness, # node closenes, and edge betweenness centralities are computed # - if x is unweighted and undirected # then the Degree, the unweighted node betweenness, # node closenes, and edge betweenness centralities are computed # - if x is weighted and directed # then the InStrength, the OutStrength, the weighted node betweenness, # node closenes, and edge betweenness centralities are computed # - if x is weighted and undirected # then the Strength, the weighted node betweenness, # node closenes, and edge betweenness centralities are computed # require(sna) # require(qgraph) # require(igraph) # Compute weights matrix: x <- getWmat(x) # If list of matrices, return list of output: if (is.list(x)) { return(lapply(x, centrality_auto, weighted = weighted, signed = signed)) } # Make unweighted or unsigned: if (!isTRUE(weighted)){ x <- sign(x) } if (!isTRUE(signed)){ x <- abs(x) } if(!is.matrix(x)) stop("the input network must be an adjacency or weights matrix") diag(x)<-0 # loops are not included in centrality analysis # x<-abs(x) # signs are not included in centrality analysis directed.gr<-ifelse(isSymmetric.matrix(object=x, tol=1e-12), FALSE, TRUE) # detect whether the graph is directed weighted.gr<-ifelse(all(mat2vec(x)%in%c(0,1)), FALSE, TRUE) # detect whether the graph is weighted # compute centrality with package qgraph: InDegree, OutDegree, Closeness, Betwenness net_qg<-qgraph(x, diag=FALSE, labels=colnames(x), DoNotPlot=TRUE, minimum=0) centr<-centrality(net_qg) # betweenness should be divided by two if the network is undirected if(directed.gr & !weighted.gr) centr1<-data.frame(cbind("Betweenness"=centr$Betweenness, "Closeness"=centr$Closeness, "InDegree"=centr$InDegree, "OutDegree"=centr$OutDegree, "OutExpectedInfluence" = centr$OutExpectedInfluence, "InExpectedInfluence" = centr$InExpectedInfluence )) if(directed.gr & weighted.gr) centr1<-data.frame(cbind("Betweenness"=centr$Betweenness, "Closeness"=centr$Closeness, "InStrength"=centr$InDegree, "OutStrength"=centr$OutDegree, "OutExpectedInfluence" = centr$OutExpectedInfluence, "InExpectedInfluence" = centr$InExpectedInfluence)) if(!directed.gr & !weighted.gr) centr1<-data.frame(cbind("Betweenness"=centr$Betweenness/2, "Closeness"=centr$Closeness, "Degree"=centr$OutDegree, "ExpectedInfluence" = centr$OutExpectedInfluence)) if(!directed.gr & weighted.gr) centr1<-data.frame(cbind("Betweenness"=centr$Betweenness/2, "Closeness"=centr$Closeness, "Strength"=centr$OutDegree, "ExpectedInfluence" = centr$OutExpectedInfluence)) row.names(centr1)<-colnames(x) # if the largest component is smaller than the network, closeness is recomputed only on the largest component log <- capture.output({ graph <- igraph::graph.adjacency(1*(x!=0), mode = ifelse(directed.gr,"directed","undirected")) comps <- igraph::components(graph) largcomp <- comps$membership == which.max(comps$csize) # largcomp<-component.largest(x, connected="strong") # select only the largest component }) if(sum(largcomp) 1) { x2<-x[largcomp,largcomp] clos<-centrality(qgraph(x2, diag=FALSE, labels=colnames(x)[largcomp], DoNotPlot=TRUE, minimum=0))$Closeness centr1$Closeness[largcomp]<-clos centr1$Closeness[!largcomp]<-NA } # # compute edge betweenness with package igraph # net_ig_abs<-graph.adjacency(adjmatrix=abs(1/x), mode=ifelse(directed.gr, "directed", "undirected"), weighted=(if(weighted.gr)TRUE), diag=FALSE) # # compute edge betweenness with package igraph if (weighted.gr){ igraphinput <- abs(1/x) } else { igraphinput <- x } net_ig_abs<- igraph::graph.adjacency(adjmatrix=igraphinput, mode=ifelse(directed.gr, "directed", "undirected"), weighted=(if(weighted.gr)TRUE), diag=FALSE) # edge betweenness centrality edgebet<-edge.betweenness(graph=net_ig_abs, directed=directed.gr) el<-data.frame(get.edgelist(graph=net_ig_abs), stringsAsFactors=FALSE) edgebet<-merge(el, edgebet, by=0) edgebet$Row.names<-NULL names(edgebet)<-c("from", "to", "edgebetweenness") edgebet<-edgebet[order(edgebet$edgebetweenness, decreasing=TRUE),] # shortest path lengths ShortestPathLengths<-centr$ShortestPathLengths rownames(ShortestPathLengths)<-colnames(ShortestPathLengths)<-colnames(x) Res <- list("node.centrality"=centr1, "edge.betweenness.centrality"=edgebet, "ShortestPathLengths"=ShortestPathLengths) class(Res) <- c("list","centrality_auto") return(Res) } ################################################# ## functions to compute clustering coefficient ## ################################################# # Exported: clustcoef_auto<-function(x, thresholdWS=0, thresholdON=0) { # this function computes several indices of clustering coefficient # INPUT: # x is an adjacency matrix or a weight matrix # thresholdWS is the threshold used for binarizing a weighted network for computing the unweighted # clustering coefficient by Watts & Strogatz (1998). # OUTPUT: # all or a subset of the following indices of clustering coefficient, according to the kind of network # in input. # - clustWS: the unweighted clustering coefficient by Watts & Strogatz (1998) # - signed_clustWS: the generalization of the unweighted clustering coefficient for signed networks # by Costantini & Perugini (in press) # - clustZhang: the weighted clustering coefficient by Zhang & Horvath (2005). # - signed_clustZhang: the generalization of the clustering coefficient by Zhang & Horvath to signed networks # by Costantini & Perugini (in press) # - clustOnnela: the weighted clustering coefficient by Onnela et al. (2005) # - signed_clustOnnela: the generalization of the clustering coefficient by Onnela et al. to signed networks # by Costantini & Perugini (in press) # -clustBarrat: the weighted clustering coefficient by Barrat et al. (2004) # require(igraph) # Compute weights matrix: x <- getWmat(x) # If list of matrices, return list of output: if (is.list(x)) { return(lapply(x, clustcoef_auto, thresholdWS=thresholdWS, thresholdON=thresholdWS)) } # check adjacency matrix (this code is mostly borrowed from package WGCNA, function checkAdjMat) dim = dim(x) if (is.null(dim) || length(dim) != 2) stop("adjacency is not two-dimensional") if (!is.numeric(x)) stop("adjacency is not numeric") if (dim[1] != dim[2]) stop("adjacency is not square") if (max(abs(x - t(x)), na.rm = TRUE) > 1e-12) stop("adjacency is not symmetric") if (min(x, na.rm = TRUE) < -1 || max(x, na.rm = TRUE) > 1) x<-x/max(abs(x)) ##################### weighted.gr<-ifelse(all(abs(x)%in%c(0,1)), FALSE, TRUE) # detect whether the graph is weighted signed.gr<-ifelse(all(x>=0), FALSE, TRUE) # detect whether the graph is weighted # compute Barrat clustering coefficient net_ig<-graph.adjacency(adjmatrix=abs(x), mode="undirected", weighted=(if(weighted.gr)TRUE), diag=FALSE) cb<-transitivity(net_ig, type="barrat", isolates="zero") # compute the other measures of clustering coefficient cw<-clustWS(x, thresholdWS) cz<-clustZhang(x) co<-clustOnnela(x, thresholdON) if (!signed.gr &! weighted.gr) output<-cbind("clustWS"=cw[,1]) if (!signed.gr & weighted.gr) output<-cbind("clustWS"=cw[,1], "clustZhang"=cz[,1], "clustOnnela"=co[,1], "clustBarrat"=cb) if (signed.gr & !weighted.gr) output<-cbind("clustWS"=cw[,1], "signed_clustWS"=cw[,2]) if (signed.gr & weighted.gr) output<-cbind("clustWS"=cw[,1], "signed_clustWS"=cw[,2], "clustZhang"=cz[,1], "signed_clustZhang"=cz[,2], "clustOnnela"=co[,1], "signed_clustOnnela"=co[,2], "clustBarrat"=cb) # nodes for which the clustering coefficient cannot be computed have now NaN # this puts their value to zero output[is.na(output)]<-0 Res <- data.frame(output) class(Res) <- c("data.frame","clustcoef_auto") rownames(Res) <- colnames(x) return(Res) } clustWS<-function(x, thresholdWS=0) { # Compute weights matrix: W <- getWmat(x) # If list of matrices, return list of output: if (is.list(W)) { return(lapply(W, clustWS, thresholdWS=thresholdWS)) } threshold<-thresholdWS diag(W)<-0 A<-matrix(0, nrow=nrow(W), ncol=ncol(W)) A[W>threshold]<-1 A[W<(-threshold)]<--1 diag(A)<-0 a_A<-abs(A) a_num<-diag(a_A%*%a_A%*%a_A) num<-diag(A%*%A%*%A) ki<-colSums(abs(A)) den<-ki*(ki-1) cW<-num/den a_cW<-a_num/den data.frame(cbind("clustWS"=a_cW, "signed_clustWS"=cW)) } clustZhang<-function(x) { # Compute weights matrix: W <- getWmat(x) # If list of matrices, return list of output: if (is.list(W)) { return(lapply(W, clustZhang)) } # this function has been adapted from package WGCNA diag(W)<-0 a_W<-abs(W) num<-diag(W%*%W%*%W) a_num<-diag(a_W%*%a_W%*%a_W) den<-colSums(a_W)^2-colSums(W^2) cZ<-num/den a_cZ<-a_num/den data.frame(cbind("clustZhang"=a_cZ, "signed_clustZhang"=cZ)) } clustOnnela<-function(x, thresholdON=0) { # Compute weights matrix: W <- getWmat(x) # If list of matrices, return list of output: if (is.list(W)) { return(lapply(W, clustOnnela, thresholdON = thresholdON)) } threshold<-thresholdON diag(W)<-0 W[abs(W)=0]<-a_W13[W>=0] W13[W<0]<-(abs(W[W<0])^(1/3))*(-1) num<-diag(W13%*%W13%*%W13) a_num<-diag(a_W13%*%a_W13%*%a_W13) A<-matrix(0, nrow=nrow(W), ncol=ncol(W)) A[abs(W)>threshold]<-1 ki<-colSums(A) den<-ki*(ki-1) cO<-num/den a_cO<-a_num/den data.frame(cbind("clustOnnela"=a_cO, "signed_clustOnnela"=cO)) } ################################## # evaluation of smallworldness # ################################ # Exported: smallworldness<-function(x, B=1000, up=.995, lo=.005) { #compute the small worldness of Humphries & Gurney (2008) # require(igraph) # require(sna) # Compute weights matrix: x <- getWmat(x) # If list of matrices, return list of output: if (is.list(x)) { return(lapply(x, smallworldness, B=B, up=up, lo=lo)) } # consider only the adjacency matrix A<-x!=0 # transitivity of A A<-graph.adjacency(A, mode="undirected", diag=F, weighted=NULL) N<-vcount(A) m<-ecount(A) clusttrg<-transitivity(A, type="global", isolates="zero") lengthtrg<-average.path.length(graph=A, directed=F, unconnected=F) #generate B rnd networks with the same degree distribution of A deg.dist<-igraph::degree(A, mode="all", loops=F) rndA<-lapply(1:B, function(x)degree.sequence.game(deg.dist, method="simple.no.multiple")) # compute the average (global) clustering coefficient over the B random networks clustrnd<-sapply(rndA, transitivity, type="global", isolates="zero") clustrnd_m<-mean(clustrnd) # compute the upper and lower quantiles clustrnd_lo<-quantile(clustrnd, lo) clustrnd_up<-quantile(clustrnd, up) # compute the average shortest path length in random networks, the shortest path # length among unconnected nodes is computed as N, i.e., 1 plus the max possible path length lengthrnd<-sapply(rndA, average.path.length, directed=F, unconnected=F) lengthrnd_m<-mean(lengthrnd) # compute the upper and lower quantiles lengthrnd_lo<-quantile(lengthrnd, lo) lengthrnd_up<-quantile(lengthrnd, up) # compute humphries&gourney(2008) smallworld-ness sigma<-(clusttrg/clustrnd_m)/(lengthtrg/lengthrnd_m) c("smallworldness"=sigma, "trans_target"=clusttrg, "averagelength_target"=lengthtrg, "trans_rnd_M"=clustrnd_m, "trans_rnd_lo"=unname(clustrnd_lo), "trans_rnd_up"=unname(clustrnd_up), "averagelength_rnd_M"=lengthrnd_m, "averagelength_rnd_lo"=unname(lengthrnd_lo), "averagelength_rnd_up"=unname(lengthrnd_up)) } qgraph/R/fixnames.R0000644000176200001440000000026414430573263013703 0ustar liggesusersfixnames <- function(x,name="") { if (is.null(names(x))) { names(x) <- paste0(name,seq_along(x)) } return(ifelse(names(x)=='',paste0(name,seq_along(x)), names(x))) }qgraph/R/drawEdge.R0000644000176200001440000000546114430573263013617 0ustar liggesusers### Draws an edge. x = vector of x coordinates, y = vector of y coordinates: drawEdge <- function(x,y,col=1,lwd=1,arrowlwd=1,lty=1,directed=FALSE,bidirectional=FALSE,arrows=TRUE,arrowAngle=pi/6,open=FALSE) { stopifnot(length(x)==length(y)) n <- length(x) xRange <- c(x[1],x[n]) yRange <- c(y[1],y[n]) stopifnot(n>1) # If arrowheads are drawn, censor start and finish of edge: if (directed && isTRUE(arrows)) { # Radius of the arrow in inches: arrowRad <- ArrowRadIn(angle=arrowAngle,cex=arrowlwd) # Distance of each curve point in inches: curveDist <- sqrt((usr2inX(x)-usr2inX(xRange[2]))^2 + (usr2inY(y)-usr2inY(yRange[2]))^2) # Points that fall outside the arrow: OutsideArrow <- curveDist > arrowRad # Censor x and y: x <- x[rev(cumsum(rev(OutsideArrow))>0)] y <- y[rev(cumsum(rev(OutsideArrow))>0)] # Add midpoint: mid <- ArrowMidPoint(xRange[2],yRange[2],atan2usr2in(xRange[2]-x[length(x)],yRange[2]-y[length(y)]),angle=arrowAngle,cex=arrowlwd) x <- c(x,mid[1]) y <- c(y,mid[2]) if (bidirectional) { # Distance of each curve point in inches: curveDist <- sqrt((usr2inX(x)-usr2inX(xRange[1]))^2 + (usr2inY(y)-usr2inY(yRange[1]))^2) # Points that fall outside the arrow: OutsideArrow <- curveDist > arrowRad # Censor x and y: x <- x[cumsum(OutsideArrow)>0] y <- y[cumsum(OutsideArrow)>0] # Add midpoint: mid <- ArrowMidPoint(xRange[1],yRange[1],atan2usr2in(xRange[1]-x[1],yRange[1]-y[1]),angle=arrowAngle,cex=arrowlwd) x <- c(mid[1],x) y <- c(mid[2],y) } n <- length(x) } # Draw the edge: lines(x,y,lwd=lwd,col=col,lty=lty) # Draw the arrowheads: if (directed) { if (!is.logical(arrows)) { if (n > 2) { Ax=seq(1,n,length=arrows+2) Ay=seq(1,n,length=arrows+2) for (a in 2:(arrows+1)) { DrawArrow(x[Ax[a]+1],y[Ay[a]+1],atan2usr2in(x[Ax[a]+1]-x[Ax[a]],y[Ay[a]+1]-y[Ay[a]]),angle=arrowAngle,cex=arrowlwd,open=open,lwd=max(lwd/2,1),lty=lty,col) } } else { Ax=seq(x[1],x[n],length=arrows+2) Ay=seq(y[1],y[n],length=arrows+2) for (a in 1:arrows+1) { DrawArrow(Ax[a],Ay[a],atan2usr2in(Ax[a]-x[1],Ay[a]-y[1]),angle=arrowAngle,cex=arrowlwd,open=open,lwd=max(lwd/2,1),lty=lty,col) } } } else if (arrows) { DrawArrow(xRange[2],yRange[2],atan2usr2in(xRange[2]-x[n-1],yRange[2]-y[n-1]),angle=arrowAngle,cex=arrowlwd,open=open,lwd=max(lwd/2,1),lty=lty,col) if (bidirectional) { DrawArrow(xRange[1],yRange[1],atan2usr2in(xRange[1]-x[2],yRange[1]-y[2]),angle=arrowAngle,cex=arrowlwd,open=open,lwd=max(lwd/2,1),lty=lty,col) } } } }qgraph/R/toJSONarray.R0000644000176200001440000000124214430573263014241 0ustar liggesusers# This function was copied directly from D3network package: # https://github.com/christophergandrud/d3Network/blob/cc224daa983e59dbe743c18e1ccb621bdd0ce509/R/utils.R toJSONarray <- function(dtf){ clnms <- colnames(dtf) name.value <- function(i){ quote <- ''; if(!is(dtf[, i],"numeric") && !is(dtf[, i], "integer")){ quote <- '"'; } paste('"', i, '" : ', quote, dtf[,i], quote, sep='') } objs <- apply(sapply(clnms, name.value), 1, function(x){paste(x, collapse=', ')}) objs <- paste('{', objs, '}') res <- paste('[', paste(objs, collapse=', '), ']') return(res) }qgraph/R/addTrans.R0000644000176200001440000000151414430573263013630 0ustar liggesusersaddTrans <- function(color,trans) { # This function adds transparancy to a color. # Define transparancy with an integer between 0 and 255 # 0 being fully transparant and 255 being fully visable # Works with either color and trans a vector of equal length, # or one of the two of length 1. if (length(color)!=length(trans)&!any(c(length(color),length(trans))==1)) stop("Vector lengths not correct") if (length(color)==1 & length(trans)>1) color <- rep(color,length(trans)) if (length(trans)==1 & length(color)>1) trans <- rep(trans,length(color)) num2hex <- function(x) { hex <- unlist(strsplit("0123456789ABCDEF",split="")) return(paste(hex[(x-x%%16)/16+1],hex[x%%16+1],sep="")) } rgb <- rbind(col2rgb(color),trans) res <- paste("#",apply(apply(rgb,2,num2hex),2,paste,collapse=""),sep="") return(res) }qgraph/R/FDRnetwork.R0000644000176200001440000000157214430573263014121 0ustar liggesusersFDRnetwork <- function( net, # Correlation or partial correlation matrix cutoff = 0.1, # Cutoff value for lfdr method = c('lfdr', 'pval', 'qval') # Element of result to use in thresholding. pval: remove edges HIGHER than cutoff score. qval: remove edges HIGHER than cutoff score ) { # Check if net is (partial) correlation network: if (!isSymmetric(net)) stop("Matrix is not symmetric, cannot be a (partial) correlation matrix") if (!all(eigen(net)$values > 0)) stop("Matrix is not positive definite, cannot be a (partial) correlation matrix") vec <- net[upper.tri(net)] Res <- fdrtool(vec, "correlation", plot=FALSE, verbose = FALSE, cutoff.method = "locfdr") newnet <- net newnet[upper.tri(newnet)][Res[[method[[1]]]] > cutoff] <- 0 newnet[lower.tri(newnet)] <- t(newnet)[lower.tri(newnet)] newnet <- as.matrix(newnet) diag(newnet) <- 1 return(newnet) } qgraph/R/vein.R0000644000176200001440000001017114430573263013030 0ustar liggesusersflow <- function( object, # qgraph object from, # Node of origin horizontal = TRUE, equalize = TRUE, minCurve = 1, maxCurve = 4, unfadeFirst = FALSE, fade = TRUE, labels, # Same as qgraph # sizeOrig = 10, # sizeCon = 3, # sizeDiscon = 1, # fadingStyle = c("gradual","split","default","off"), # proportional fading to distance? # maxFade = 0.25, # xScale = 1, ... # Qgraph arguments ){ # Test input: if (!is(object,"qgraph")){ warning("Input is not a qgraph object, runnin gqgraph") object <- qgraph(object, ..., DoNotPlot = TRUE) } if (length(from)!=1){ stop("'from' must be of length 1") } if (missing(labels)){ labels <- object$graphAttributes$Nodes$labels } # Obtain edgelist: E <- as.data.frame(object$Edgelist) # If not fully connected, stop: Adj <- 1*(getWmat(object)!=0) diag(Adj) <- 0 Laplacian <- diag(rowSums(Adj)) - Adj evLapl <- round(eigen(Laplacian)$values,10) comps <- sum(evLapl == 0) if (comps > 1){ stop("Disconnected graph is not yet supported.") } # ID all edges: E$id <- seq_len(nrow(E)) # Subset the edgelist as to retain only the simple vein-strucure outward of a node. Recursively, outward from the target node, add edges connected to the current nodes to nodes that are not currently in the vein graph # From as character: if (is.character(from)){ if (missing(labels) || !is.character(labels)) stop("No labels supplied") from <- which(labels == from) } # First part: part1 <- E[E[,1] == from,c("from","to","weight","id")] part2 <- E[E[,2] == from,c("to","from","weight","id")] names(part2) <- c("from","to","weight","id") VeinEdgelist <- rbind(part1,part2) # Recurse: repeat{ # Currently in vein graph: currentNodes <- unique(c(unlist(VeinEdgelist[,1:2]))) # Connected to current nodes: part1 <- E[E[,1] %in% currentNodes & !E[,2] %in% currentNodes,c("from","to","weight","id")] part2 <- E[E[,2] %in% currentNodes & !E[,1] %in% currentNodes,c("to","from","weight","id")] names(part2) <- c("from","to","weight","id") Connected <- rbind(part1,part2) # If no connected, break: if (nrow(Connected) == 0){ break } # Else, add to veingraph: VeinEdgelist <- rbind(VeinEdgelist,Connected) } # Now run through Reingold_Tilford: iG <- igraph::graph_from_edgelist(as.matrix(VeinEdgelist[,1:2]),directed = FALSE) Layout <- igraph::layout_as_tree(iG, root = from, mode = "all") # Equalize levels if needed: if ( equalize){ Layout[,1] <- ave(Layout[,1],Layout[,2],FUN=function(x)seq(0,1,length=length(x)+2)[-c(1,length(x)+2)]) } # Set curve as a function of distance: dist <- abs(Layout[,1][E[,1]] - Layout[,1][E[,2]]) minDist <- min(dist[round(dist,10)!=0]) maxDist <- max(dist[dist!=0]) curve <- ifelse(Layout[,1][E[,1]] == Layout[,1][E[,2]], 0,minCurve + (dist - minDist) / (maxDist - minDist) * (maxCurve - minCurve) ) # Curve all edges that are on same level: Curve <- ifelse(Layout[,2][E[,1]] == Layout[,2][E[,2]], # Curve negative or positive: ifelse(Layout[,1][E[,1]] > Layout[,1][E[,2]], curve, -curve), # Else no curve: 0) # # qgraph(VeinEdgelist[,1:3],layout=Layout) # qgraph(E[,1:3],layout=Layout, directed = FALSE, # curve = Curve) # #ECPs: # All curved edges should be connected to right or bottom ECP <- matrix(NA,nrow(E),nrow(E)) ECP[Curve!=0,] <- pi # Now turn if (horizontal){ Layout <- Layout[,2:1] Layout[,1] <- -Layout[,1] ECP[Curve!=0,] <- pi/2 } if (unfadeFirst){ fade <- ifelse(E[,1] %in% from | E[,2] %in% from, FALSE, NA) } # Plot: # qgraph(object, layout = Layout, curve = Curve, edgeConnectPoints = ECP, curveScale = FALSE,fade=fade,labels=labels, ...) qgraph(as.matrix(E[,1:3]), layout = Layout, curve = Curve, edgeConnectPoints = ECP, curveScale = FALSE, fade=fade, labels=labels, object, directed = object$Edgelist$directed, bidirectional = object$Edgelist$bidirectional, ...) } qgraph/R/ggmModSelect.R0000644000176200001440000001201714430573263014442 0ustar liggesusers# Model selection: ggmModSelect <- function( S, # Sample covariance matrix n, # Sample size gamma = 0, # EBIC parameter, set to 0 for BIC selection start = c("glasso","empty","full"), stepwise = TRUE, considerPerStep = c("subset","all"), # Subset will only consider changing edges that previously would improve EBIC. When no edge improves. All edges are tested again. verbose = TRUE, nCores = 1, checkPD = TRUE, criterion = "ebic", ... # EBICglasso arguments for starting point ) { if (is.character(start)){ # Start: start <- match.arg(start) } else { startMat <- start start <- "manual" } # Number of variables: nVar <- ncol(S) # Warning if there are many variables: if (nVar > 30 && stepwise && verbose){ message("'ggmModSelect' using stepwise = TRUE may be very slow in large graphs (> 30 nodes). Consider setting stepwise = FALSE") } if (checkPD){ if (any(eigen(S)$values < 0)) stop("'S' is not positive definite") } # Standardize cov matrix: S <- cov2cor(S) ### Starting graph ### if (start == "glasso"){ if (verbose) message("Running glasso to obtain starting model...") # Run the glassopath: glassores <- EBICglassoCore( S = S, # Sample covariance matrix n = n, # Sample size gamma = gamma, refit = TRUE, # If TRUE, network structure is taken and non-penalized version is computed. ebicMethod = "new", regularized = FALSE, threshold = FALSE, verbose = FALSE, returnAllResults = TRUE, criterion = criterion) curGraph <- glassores$optnet curEBIC <- min(glassores$ebic) } else if (start == "empty"){ curGraph <- matrix(0, nVar, nVar) fit <- ggmFit(curGraph, S, n, verbose = FALSE, ebicTuning = gamma) curEBIC <- fit$fitMeasures[[criterion]] } else if (start == "full"){ curGraph <- corpcor::cor2pcor(cov2cor(S)) fit <- ggmFit(curGraph, S, n, verbose = FALSE, ebicTuning = gamma) curEBIC <- fit$fitMeasures[[criterion]] } else if (start == "manual"){ curGraph <- startMat fit <- ggmFit(startMat, S, n, verbose = FALSE, ebicTuning = gamma, refit = TRUE) curEBIC <- fit$fitMeasures[[criterion]] } # If not stepwise model search, stop here: if (!stepwise){ Results <- list( graph = curGraph, criterion = curEBIC ) return(Results) } # Parallel: if (nCores > 1){ cl <- parallel::makePSOCKcluster(nCores - 1) # Export to cluster: parallel::clusterExport(cl, c("S","n"), envir = environment()) } else { cl <- NULL } # Edges to currently consider: curSkel <- curGraph!=0 curEdges <- curSkel[upper.tri(curSkel)] curConsider <- rep(TRUE,length(curEdges)) # Now perform stepwise model selection until optimum is reached: repeat{ # Form all graphs to consider: allGraphs <- lapply(which(curConsider),function(i){ testEdges <- curEdges testEdges[i] <- !curEdges[i] testSkel <- matrix(0,nVar,nVar) testSkel[upper.tri(testSkel)] <- 1*testEdges testSkel[lower.tri(testSkel)] <- t(testSkel)[lower.tri(testSkel)] testSkel }) # Run the loop: if (all(curConsider)){ if (verbose) message("Testing all edges...") } else { if (verbose) message("Testing subset of edges...") } Results <- pblapply(allGraphs,function(G){ # Fit graph: if (!all(G[upper.tri(G)] != 0)){ suppressWarnings(glassores_test <- glasso::glasso(S, 0, zero = which(G == 0 & upper.tri(G), arr.ind=TRUE), trace = 0, penalize.diagonal=FALSE, ...)) } else { suppressWarnings(glassores_test <- glasso::glasso(S, 0, trace = 0, penalize.diagonal=FALSE, ...)) } # Compute EBIC: fit <- ggmFit(invSigma = glassores_test$wi,covMat = S, sampleSize = n, ebicTuning = gamma, refit = FALSE) return(list( graph = wi2net(glassores_test$wi), criterion = fit$fitMeasures[[criterion]] )) }, cl = cl) # All EBICs: EBICs <- sapply(Results,"[[","criterion") # Test if any smaller: if (any(EBICs < curEBIC)){ # Update which to consider: curConsider[curConsider] <- EBICs < curEBIC # Find optimal network: optnet <- which.min(EBICs) curGraph <- Results[[optnet]]$graph curEBIC <- Results[[optnet]]$criterion curConsider[optnet] <- FALSE if (verbose) message("Changed one edge...") # Update current edges: if (!any(curConsider)){ curConsider[] <- TRUE } curSkel <- curGraph!=0 curEdges <- curSkel[upper.tri(curSkel)] } else { # If we were considering all edges, break: if (all(curConsider)){ break } else { # Else consider all edges again: curConsider[] <- TRUE } } } # stop cluster: if (nCores > 1){ parallel::stopCluster(cl) } if (!is.null(colnames(S))){ rownames(curGraph) <- colnames(curGraph) <- colnames(S) } return(list( graph = as.matrix(curGraph), criterion = curEBIC )) }qgraph/R/qgraph.R0000644000176200001440000034044714521125706013361 0ustar liggesusers# Create qgraph model: qgraph <- function( input, ... ) { # OTHER INPUT MODES: # if (any(class(input)=="factanal") ) # { # return(qgraph.efa(input,...)) # } else if (any(class(input)=="principal") ) # { # return(qgraph.pca(input,...)) # } else if (any(class(input)=="lavaan")) # { # return(qgraph.lavaan(input,edge.labels=TRUE,include=8,filetype="",...)) # } else if (any(class(input)=="sem")) # { # return(qgraph.sem(input,edge.labels=TRUE,include=6,filetype="",...)) # } else if (is(input,"loadings")) { return(qgraph.loadings(input,...)) # } # else if (any(class(input)=="semmod")) # { # return(qgraph.semModel(input,...)) } else if (is.list(input) && identical(names(input),c("Bhat", "omega", "lambda1", "lambda2"))) { layout(t(1:2)) Q1 <- qgraph((input$omega + t(input$omega) ) / 2,...) Q2 <- qgraph(input$Bhat,...) return(list(Bhat = Q1, omega = Q2)) } ### EMPTY QGRAPH OBJECT #### qgraphObject <- list( Edgelist = list(), Arguments = list(), plotOptions = list(), graphAttributes = list( Nodes = list(), Edges = list(), Graph = list() ), layout = matrix(), layout.orig = matrix() ) class(qgraphObject) <- "qgraph" ### Extract nested arguments ### # if ("qgraph"%in%class(input)) qgraphObject$Arguments <- list(...,input) else qgraphObject$Arguments <- list(...) qgraphObject$Arguments <- list(...,input=input) if (isTRUE(qgraphObject$Arguments[['gui']]) | isTRUE(qgraphObject$Arguments[['GUI']])) { qgraphObject$Arguments$gui <- qgraphObject$Arguments$GUI <- FALSE return(invisible(do.call(qgraph.gui,c(list(input=input),qgraphObject$Arguments)))) } if(!is.null(qgraphObject$Arguments$adj)) { stop("'adj' argument is no longer supported. Please use 'input'") } # Import qgraphObject$Arguments: if (length(qgraphObject$Arguments) > 0) qgraphObject$Arguments <- getArgs(qgraphObject$Arguments) # Import default arguments: def <- getOption("qgraph") if (!is.null(def$qgraph)) class(def$qgraph) <- "qgraph" if (any(sapply(def,function(x)!is.null(x)))) { qgraphObject$Arguments <- getArgs(c(qgraphObject$Arguments,def)) } # If qgraph object is used as input, recreate edgelist input: if (is(input,"qgraph")) { # if (is.null(qgraphObject$Arguments$directed)) qgraphObject$Arguments$directed <- input$Edgelist$directed # if (is.null(qgraphObject$Arguments$bidirectional)) qgraphObject$Arguments$bidirectional <- input$Edgelist$bidirectional # if (is.null(qgraphObject$Arguments$nNodes)) qgraphObject$Arguments$nNodes <- input$graphAttributes$Graph$nNodes # if (!is.null(qgraphObject$Arguments$input)){ input <- qgraphObject$Arguments$input } else { if(input[['graphAttributes']][['Graph']][['weighted']]) { input <- cbind(input$Edgelist$from,input$Edgelist$to,input$Edgelist$weight) } else { input <- cbind(input$Edgelist$from,input$Edgelist$to) } } # qgraphObject$Arguments$edgelist <- TRUE } ### PCALG AND GRAPHNEL ### if (is(input,"pcAlgo") | is(input,"graphNEL")) { if (is(input,"pcAlgo")) graphNEL <- input@graph else graphNEL <- input qgraphObject$Arguments$directed <- graphNEL@graphData$edgemode == "directed" qgraphObject$Arguments$bidirectional <- TRUE TempLabs <- graphNEL@nodes if (is.null(qgraphObject$Arguments$labels)) qgraphObject$Arguments$labels <- graphNEL@nodes weights <- sapply(graphNEL@edgeData@data,'[[','weight') EL <- laply(strsplit(names(weights),split="\\|"),'[',c(1,2)) # EL <- apply(EL,2,as.numeric) EL[,1] <- match(EL[,1],TempLabs) EL[,2] <- match(EL[,2],TempLabs) mode(EL) <- "numeric" # Create mixed graph if pcAlgo: if (is(input,"pcAlgo")) { srtInput <- aaply(EL,1,sort) qgraphObject$Arguments$directed <- !(duplicated(srtInput)|duplicated(srtInput,fromLast=TRUE)) rm(srtInput) } input <- EL rm(EL) if (any(weights!=1)) input <- cbind(input,weights) qgraphObject$Arguments$edgelist <- TRUE } ### bnlearn ### if (is(input,"bn")) { # browser() bnobject <- input input <- as.matrix(bnobject$arcs) TempLabs <- names(bnobject$nodes) if (is.null(qgraphObject$Arguments$labels)) qgraphObject$Arguments$labels <- TempLabs input[] <- as.numeric(match(c(input), TempLabs)) mode(input) <- "numeric" srtInput <- aaply(input,1,sort) input <- input[!duplicated(srtInput),] qgraphObject$Arguments$directed <- !(duplicated(srtInput)|duplicated(srtInput,fromLast=TRUE)) qgraphObject$Arguments$directed <- qgraphObject$Arguments$directed[!duplicated(srtInput)] qgraphObject$Arguments$edgelist <- TRUE } if (is(input,"bn.strength")) { bnobject <- input input <- as.matrix(bnobject[c("from","to","strength")]) TempLabs <- unique(c(bnobject$from,bnobject$to)) if (is.null(qgraphObject$Arguments$labels)) qgraphObject$Arguments$labels <- TempLabs input[,1:2] <- as.numeric(match(c(input[,1:2]), TempLabs)) input <- as.matrix(input) mode(input) <- "numeric" if (is.null(qgraphObject$Arguments$directed)) { if (is.null(bnobject$direction) || all(bnobject$direction %in% c(0,0.5))) { qgraphObject$Arguments$directed <- FALSE } else qgraphObject$Arguments$directed <- TRUE } if (!is.null(bnobject$direction)) { input[,3] <- input[,3] * ( 1 - qgraphObject$Arguments$directed * (1- bnobject$direction )) } # remove undirect duplicates: srt <- cbind( pmin(input[,1],input[,2]), pmax(input[,1],input[,2])) input <- input[!(duplicated(srt)&!qgraphObject$Arguments$directed), ] rm(srt) # srtInput <- aaply(input,1,sort) # input <- input[!duplicated(srtInput),] # qgraphObject$Arguments$directed <- !(duplicated(srtInput)|duplicated(srtInput,fromLast=TRUE)) # qgraphObject$Arguments$directed <- qgraphObject$Arguments$directed[!duplicated(srtInput)] qgraphObject$Arguments$directed <- TRUE qgraphObject$Arguments$probabilityEdges <- TRUE if (is.null( qgraphObject$Arguments$parallelEdge)) qgraphObject$Arguments$parallelEdge <- TRUE } ### BDgraph #### if (is(input,"bdgraph")) { # browser() # stop("BDgraph support has temporarily been removed") if(is.null(qgraphObject$Arguments[['BDgraph']])){ BDgraph=c("phat","Khat") } else { BDgraph=qgraphObject$Arguments[['BDgraph']] } if (all(c("Khat","phat")%in%BDgraph)) layout(t(1:2)) if(is.null(qgraphObject$Arguments[['BDtitles']])) BDtitles <- TRUE else BDtitles <- qgraphObject$Arguments[['BDtitles']] Res <- list() if (isTRUE(which(BDgraph == "phat") < which(BDgraph == "Khat"))) { if(!requireNamespace("BDgraph")) stop("'BDgraph' package needs to be installed.") # phat: W <- as.matrix(BDgraph::plinks(input)) W <- W + t(W) Res[["phat"]] <- do.call(qgraph,c(list(input=W,probabilityEdges = TRUE),qgraphObject$Arguments)) L <- Res[["phat"]]$layout if (BDtitles) text(mean(par('usr')[1:2]),par("usr")[4] - (par("usr")[4] - par("usr")[3])/40,"Posterior probabilities", adj = c(0.5,1)) # Khat: W <- as.matrix(input$K_hat) # diag(W) <- -1*diag(W) # W <- - W / sqrt(diag(W)%o%diag(W)) W <- wi2net(W) Res[["Khat"]] <- do.call(qgraph,c(list(input = W,layout = L), qgraphObject$Arguments)) L <- Res[["Khat"]]$layout if (BDtitles) text(mean(par('usr')[1:2]),par("usr")[4] - (par("usr")[4] - par("usr")[3])/40,"Mean partial correlations", adj = c(0.5,1)) } else { if ("Khat" %in% BDgraph) { W <- as.matrix(input$K_hat) # diag(W) <- -1*diag(W) # W <- - W / sqrt(diag(W)%o%diag(W)) W <- wi2net(input$K_hat) Res[["Khat"]] <- do.call(qgraph,c(list(input=W),qgraphObject$Arguments)) L <- Res[["Khat"]]$layout if (BDtitles) text(mean(par('usr')[1:2]),par("usr")[4],"Mean partial correlations", adj = c(0.5,1)) } else L <- qgraphObject$Arguments$layout if ("phat" %in% BDgraph) { W <- as.matrix(BDgraph::plinks(input)) W <- W + t(W) Res[["phat"]] <- do.call(qgraph,c(list(input = W,layout = L,probabilityEdges= TRUE), qgraphObject$Arguments)) if (BDtitles) text(mean(par('usr')[1:2]),par("usr")[4],"Posterior probabilities", adj = c(0.5,1)) } } if (length(Res)==1) Res <- Res[[1]] return(Res) } ### GLASSO ### # glasso has no class but is a list with elements w, wi, loglik, errflag, approx, del and niter: if (is(input, "list") && all(c('w', 'wi', 'loglik','errflag', 'approx', 'del', 'niter' ) %in% names(input))) { input <- wi2net(input$wi) } ### Check arguments list: allArgs <- c("input", "layout", "groups", "minimum", "maximum", "cut", "details", "threshold", "palette", "theme", "graph", "threshold", "sampleSize", "tuning", "refit", "countDiagonal", "alpha", "bonf", "FDRcutoff", "mar", "filetype", "filename", "width", "height", "normalize", "res", "DoNotPlot", "plot", "rescale", "standAlone", "color", "vsize", "vsize2", "node.width", "node.height", "borders", "border.color", "border.width", "shape", "polygonList", "vTrans", "subplots", "subpars", "subplotbg", "images", "noPar", "pastel", "rainbowStart", "usePCH", "node.resolution", "title", "preExpression", "postExpression", "diag", "labels", "label.cex", "label.color", "label.prop", "label.norm", "label.scale", "label.scale.equal", "label.font", "label.fill.vertical", "label.fill.horizontal", "esize", "edge.width", "edge.color", "posCol", "negCol", "unCol", "probCol", "negDashed", "probabilityEdges", "colFactor", "trans", "fade", "loop", "lty", "edgeConnectPoints", "curve", "curveAll", "curveDefault", "curveShape", "curveScale", "curveScaleNodeCorrection", "curvePivot", "curvePivotShape", "parallelEdge", "parallelAngle", "parallelAngleDefault", "edge.labels", "edge.label.cex", "edge.label.bg", "edge.label.position", "edge.label.font", "edge.label.color", "repulsion", "layout.par", "layout.control", "aspect", "rotation", "legend", "legend.cex", "legend.mode", "GLratio", "layoutScale", "layoutOffset", "nodeNames", "bg", "bgcontrol", "bgres", "pty", "gray", "font", "directed", "arrows", "arrowAngle", "asize", "open", "bidirectional", "mode", "alpha", "sigScale", "bonf", "scores", "scores.range", "mode", "edge.color", "knots", "knot.size", "knot.color", "knot.borders", "knot.border.color", "knot.border.width", "means", "SDs", "meanRange", "bars", "barSide", "barColor", "barLength", "barsAtSide", "pie", "pieBorder", "pieColor", "pieColor2", "pieStart", "pieDarken", "piePastel", "BDgraph", "BDtitles", "edgelist", "weighted", "nNodes", "XKCD", "Edgelist", "Arguments", "plotOptions", "graphAttributes", "layout", "layout.orig","resid","factorCors","residSize","filetype","model", "crossloadings","gamma","lambda.min.ratio","loopRotation","edgeConnectPoints","residuals","residScale","residEdge","CircleEdgeEnd","title.cex", "node.label.offset", "node.label.position", "pieCImid", "pieCIlower", "pieCIupper", "pieCIpointcex", "pieCIpointcol", "edge.label.margin") if (any(!names(qgraphObject$Arguments) %in% allArgs)){ wrongArgs <- names(qgraphObject$Arguments)[!names(qgraphObject$Arguments) %in% allArgs] warning(paste0("The following arguments are not documented and likely not arguments of qgraph and thus ignored: ",paste(wrongArgs,collapse = "; "))) } ## Extract arguments if(is.null(qgraphObject$Arguments[['verbose']])) { verbose <- FALSE } else verbose <- qgraphObject$Arguments[['verbose']] if(is.null(qgraphObject$Arguments[['tuning']])) { tuning <- 0.5 } else tuning <- qgraphObject$Arguments[['tuning']] if(!is.null(qgraphObject$Arguments[['gamma']])) { tuning <- qgraphObject$Arguments[['gamma']] } if(is.null(qgraphObject$Arguments[['lambda.min.ratio']])) { lambda.min.ratio <- 0.01 } else lambda.min.ratio <- qgraphObject$Arguments[['lambda.min.ratio']] # Refit: if(is.null(qgraphObject$Arguments[['refit']])) { refit <- FALSE } else refit <- qgraphObject$Arguments[['refit']] if(is.null(qgraphObject$Arguments[['FDRcutoff']])) { FDRcutoff <- 0.9 } else FDRcutoff <- qgraphObject$Arguments[['FDRcutoff']] ### HUGE (select via EBIC): if (is(input,"huge")) { if (input$method != "glasso") stop("Only 'glasso' method is supported") if(!requireNamespace("huge")) stop("'huge' package needs to be installed.") input <- huge::huge.select(input, "ebic", ebic.gamma = tuning) } ### HUGE select ### if (is(input,"select")) { if (input$method != "glasso") stop("Only 'glasso' method is supported") input <- wi2net(forceSymmetric(input$opt.icov)) } # Coerce input to matrix: input <- as.matrix(input) # Set mode: sigSign <- FALSE if(is.null(qgraphObject$Arguments[['graph']])) graph <- "default" else graph=qgraphObject$Arguments[['graph']] if (graph == "fdr") { graph <- "fdr.cor" } if (graph == "EBICglasso"){ graph <- "glasso" } if (graph == "ggmModSelect"){ graph <- "ggmModSelect" } if (!graph %in% c("default","cor","pcor","glasso","ggmModSelect","factorial")){ stop("'graph' argument must be one of 'default', 'cor', 'pcor', 'glasso', 'ggmModSelect', or 'factorial'") } # Reset graph for replotting: qgraphObject$Arguments[['graph']] <- NULL if (graph %in% c("sig2","significance2")) { graph <- "sig" sigSign <- TRUE } if (graph %in% c("sig","significance")) { # if (!require("fdrtool")) stop("`fdrtool' package not found, is it installed?") qgraphObject$Arguments[['mode']] <- "sig" } ### SIGNIFICANCE GRAPH ARGUMENTS ### if(is.null(qgraphObject$Arguments[['mode']])) mode <- "strength" else mode <- qgraphObject$Arguments[['mode']] if(is.null(qgraphObject$Arguments$sigScale)) sigScale <- function(x)0.7*(1-x)^(log(0.4/0.7,1-0.05)) else sigScale <- qgraphObject$Arguments$sigScale if (!mode%in%c("strength","sig","direct")) stop("Mode must be 'direct', 'sig' or 'strength'") if(is.null(qgraphObject$Arguments$bonf)) bonf=FALSE else bonf=qgraphObject$Arguments$bonf if(is.null(qgraphObject$Arguments$OmitInsig)) OmitInsig=FALSE else OmitInsig <- qgraphObject$Arguments$OmitInsig if(is.null(qgraphObject$Arguments[['alpha']])) { if (mode != "sig") { alpha <- 0.05 } else alpha <- c(0.0001,0.001,0.01,0.05) } else alpha <- qgraphObject$Arguments[['alpha']] if (length(alpha) > 4) stop("`alpha' can not have length > 4") ##### # Settings for the edgelist if(is.null(qgraphObject$Arguments$edgelist)) { if (nrow(input)!=ncol(input)) { # Check if it is an edgelist or break: if (ncol(input) %in% c(2,3) && ((is.character(input[,1]) || is.factor(input[,1])) || all(input[,1] %% 1 == 0)) && ((is.character(input[,2]) || is.factor(input[,2])) || all(input[,2] %% 1 == 0))){ edgelist <- TRUE } else { stop("Input is not a weights matrix or an edgelist.") } } else edgelist <- FALSE } else edgelist=qgraphObject$Arguments$edgelist if(is.null(qgraphObject$Arguments[['edgeConnectPoints']])) edgeConnectPoints <- NULL else edgeConnectPoints <- qgraphObject$Arguments[['edgeConnectPoints']] if(is.null(qgraphObject$Arguments[['label.color.split']])) label.color.split <- 0.25 else label.color.split <- qgraphObject$Arguments[['label.color.split']] if(is.null(qgraphObject$Arguments$labels)) { labels <- TRUE if (!edgelist && !is.null(colnames(input))) { # if (nrow(input) <= 20 & all(colnames(input)==rownames(input))) # { labels <- abbreviate(colnames(input),3) if (any(is.na(labels))){ warning("Some labels where not abbreviatable.") labels <- ifelse(is.na(labels), colnames(input), labels) } # } } } else labels <- qgraphObject$Arguments$labels if (edgelist) { if (is.character(input)) { if(!is.logical(labels)) allNodes <- labels else allNodes <- unique(c(input[,1:2])) input[,1:2] <- match(input[,1:2],allNodes) input <- as.matrix(input) mode(input) <- "numeric" if (is.logical(labels) && labels) labels <- allNodes } } if(is.null(qgraphObject$Arguments$nNodes)) { if (edgelist) { if (!is.logical(labels)) nNodes <- length(labels) else nNodes <- max(c(input[,1:2])) } else nNodes=nrow(input) } else nNodes=qgraphObject$Arguments$nNodes ##### #### Arguments for pies with Jonas # Arguments for pies: if(is.null(qgraphObject$Arguments[['pieRadius']])){ pieRadius <- 1 } else { pieRadius <- qgraphObject$Arguments[['pieRadius']] } if(is.null(qgraphObject$Arguments[['pieBorder']])){ pieBorder <- .15 if (any(pieBorder < 0 | pieBorder > 1)){ stop("Values in the 'pieBorder' argument must be within [0,1]") } } else { pieBorder <- qgraphObject$Arguments[['pieBorder']] } if(is.null(qgraphObject$Arguments[['pieStart']])){ pieStart <- 0 if (any(pieStart < 0 | pieStart > 1)){ stop("Values in the 'pieStart' argument must be within [0,1]") } } else { pieStart <- qgraphObject$Arguments[['pieStart']] } if(is.null(qgraphObject$Arguments[['pieDarken']])){ pieDarken <- 0.25 if (any(pieDarken < 0 | pieDarken > 1)){ stop("Values in the 'pieDarken' argument must be within [0,1]") } } else { pieDarken <- qgraphObject$Arguments[['pieDarken']] } if(is.null(qgraphObject$Arguments[['pieColor']])){ # pieColor <- 'grey' pieColor <- NA } else { pieColor <- qgraphObject$Arguments[['pieColor']] } if(is.null(qgraphObject$Arguments[['pieColor2']])){ pieColor2 <- 'white' } else { pieColor2 <- qgraphObject$Arguments[['pieColor2']] } # Make arguments vectorized: if (length(pieColor) == 1){ pieColor <- rep(pieColor,length=nNodes) } if (length(pieColor) != nNodes){ stop("Length of 'pieColor' argument must be 1 or number of nodes") } if (length(pieColor2) == 1){ pieColor2 <- rep(pieColor2,length=nNodes) } if (length(pieColor2) != nNodes){ stop("Length of 'pieColor2' argument must be 1 or number of nodes") } if (length(pieBorder) == 1){ pieBorder <- rep(pieBorder,length=nNodes) } if (length(pieBorder) != nNodes){ stop("Length of 'pieBorder' argument must be 1 or number of nodes") } if (length(pieStart) == 1){ pieStart <- rep(pieStart,length=nNodes) } if (length(pieStart) != nNodes){ stop("Length of 'pieStart' argument must be 1 or number of nodes") } if (length(pieDarken) == 1){ pieDarken <- rep(pieDarken,length=nNodes) } if (length(pieDarken) != nNodes){ stop("Length of 'pieDarken' argument must be 1 or number of nodes") } if(is.null(qgraphObject$Arguments[['pie']])){ drawPies <- FALSE pie <- NULL } else { # Obtain pie values: pie <- qgraphObject$Arguments[['pie']] # Check values: if (length(pie) != nNodes){ stop("Length of 'pie' argument must be equal to number of nodes.") } # if (any(pie < 0 | pie > 1)){ # stop("Values in the 'pie' argument must be within [0,1]") # } # Dummy subplots (to be filed later) # subplots <- vector("list", nNodes) # Overwrite subplotbg to NA: # subplotbg <- NA # Overwrite borders to FALSE: # borders <- FALSE # Overwrite shape to circle: # shape <- "circle" # Logical: drawPies <- TRUE } # Pie CI: # Pie CI args: # "pieCIlower", "pieCIupper", "pieCIpointcex", "pieCIpointcol" pieCIs <- FALSE # Check if pieCIs are drawn: if(!is.null(qgraphObject$Arguments[['pieCIlower']])){ pieCIs <- TRUE pieCIlower <- qgraphObject$Arguments[['pieCIlower']] if(is.null(qgraphObject$Arguments[['pieCIupper']])){ pieCIupper <- 1 } } if(!is.null(qgraphObject$Arguments[['pieCIupper']])){ pieCIs <- TRUE pieCIupper <- qgraphObject$Arguments[['pieCIupper']] if(is.null(qgraphObject$Arguments[['pieCIlower']])){ pieCIlower <- 0 } } # Set up the pieCIs: if (isTRUE(pieCIs)){ drawPies <- TRUE if (!is.null(qgraphObject$Arguments[['pieCImid']])){ pieCImid <- qgraphObject$Arguments[['pieCImid']] } else stop("'pieCImid' may not be missing when pieCIs are used") # Vectorize: pieCIlower <- rep(pieCIlower,length=nNodes) pieCIupper <- rep(pieCIupper,length=nNodes) pieCImid <- rep(pieCImid, length=nNodes) # Check mid: if (any(pieCIlower > pieCImid | pieCIupper < pieCImid)){ stop("'pieCImid' is not between 'pieCIlower' and 'pieCIupper'") } # Check bounds: if (any(pieCIlower < 0) | any(pieCImid > 1)){ stop("pieCI range should be between 0 and 1") } # If pie argument is used, give an error: if(!is.null(qgraphObject$Arguments[['pie']])){ stop("'pie' argument cannot be used in combination with pieCIs") } # get the size of the point: # "pieCIlower", "pieCIupper", "pieCIpointcex", "pieCIpointcol" if(!is.null(qgraphObject$Arguments[['pieCIpointcex']])){ pieCIpointcex <- qgraphObject$Arguments[['pieCIpointcex']] } else { pieCIpointcex <- 0.01 } pieCIpointcex <- rep(pieCIpointcex, length=nNodes) if(!is.null(qgraphObject$Arguments[['pieCIpointcol']])){ pieCIpointcol <- qgraphObject$Arguments[['pieCIpointcol']] } else { pieCIpointcol <- "black" } pieCIpointcol <- rep(pieCIpointcol, length = nNodes) # Now form the pie argument: pieTab <- cbind( 0, pieCIlower, pmax(pieCIlower, pieCImid - (pieCIpointcex/2)), pmin(pieCIupper, pieCImid + (pieCIpointcex/2)), pieCIupper, 1) pie <- list() pieColor2 <- list() for (i in seq_len(nrow(pieTab))){ pie[[i]] <- diff(pieTab[i,]) pieColor2[[i]] <- c("white",pieColor[i],pieCIpointcol[i],pieColor[i],"white") } pieColor <- pieColor2 } ##### if (is.expression(labels)) labels <- as.list(labels) if(is.null(qgraphObject$Arguments[['background']])) background <- NULL else background <- qgraphObject$Arguments[['background']] if(is.null(qgraphObject$Arguments[['label.prop']])){ label.prop <- 0.9*(1-ifelse(pieBorder < 0.5,pieBorder,0)) } else { label.prop <- qgraphObject$Arguments[['label.prop']] } if(is.null(qgraphObject$Arguments[['label.norm']])) label.norm <- "OOO" else label.norm <- qgraphObject$Arguments[['label.norm']] # if(is.null(qgraphObject$Arguments[['label.cex']])) label.cex <- NULL else label.cex <- qgraphObject$Arguments[['label.cex']] # if(is.null(qgraphObject$Arguments[['nodeNames']])) nodeNames <- NULL else nodeNames <- qgraphObject$Arguments[['nodeNames']] if(is.null(qgraphObject$Arguments[['subplots']])) { # if (!drawPies){ subplots <- NULL # } } else { # if (drawPies){ # warning("'subplots' argument ignored if 'pie' argument is used.") # } else { subplots <- qgraphObject$Arguments[['subplots']] # } } if(is.null(qgraphObject$Arguments[['subpars']])) subpars <- list(mar=c(0,0,0,0)) else subpars <- qgraphObject$Arguments[['subpars']] if(is.null(qgraphObject$Arguments[['subplotbg']])) { # if (!drawPies){ subplotbg <- NULL # } } else { # if (drawPies){ # warning("'subplotbg' argument ignored if 'pie' argument is used.") # } else { subplotbg <- qgraphObject$Arguments[['subplotbg']] # } } if(is.null(qgraphObject$Arguments[['images']])) images <- NULL else images <- qgraphObject$Arguments[['images']] if(is.null(qgraphObject$Arguments[['noPar']])) noPar <- FALSE else noPar <- qgraphObject$Arguments[['noPar']] # Knots: if(is.null(qgraphObject$Arguments[['knots']])) knots <- list() else knots <- qgraphObject$Arguments[['knots']] if(is.null(qgraphObject$Arguments[['knot.size']])) knot.size <- 1 else knot.size <- qgraphObject$Arguments[['knot.size']] if(is.null(qgraphObject$Arguments[['knot.color']])) knot.color <- NA else knot.color <- qgraphObject$Arguments[['knot.color']] if(is.null(qgraphObject$Arguments[['knot.borders']])) knot.borders <- FALSE else knot.borders <- qgraphObject$Arguments[['knot.borders']] if(is.null(qgraphObject$Arguments[['knot.border.color']])) knot.border.color <- "black" else knot.border.color <- qgraphObject$Arguments[['knot.border.color']] if(is.null(qgraphObject$Arguments[['knot.border.width']])) knot.border.width <- 1 else knot.border.width <- qgraphObject$Arguments[['knot.border.width']] ##### if(is.null(qgraphObject$Arguments$shape)) { # if (!drawPies){ shape <- rep("circle",nNodes) if (!is.null(subplots)) { # Get which nodes become a subplot: whichsub <- which(sapply(subplots,function(x)is.expression(x)|is.function(x))) shape[whichsub][!shape[whichsub]%in%c("square","rectangle")] <- "square" } # } } else { # if (drawPies){ # warning("'shape' argument ignored if 'pie' argument is used.") # } else { shape <- qgraphObject$Arguments[['shape']] # } } if(is.null(qgraphObject$Arguments[['usePCH']])) { if (nNodes > 50 && !drawPies) usePCH <- TRUE else usePCH <- NULL } else usePCH <- qgraphObject$Arguments[['usePCH']] if(is.null(qgraphObject$Arguments[['node.resolution']])) node.resolution <- 100 else node.resolution <- qgraphObject$Arguments[['node.resolution']] # Default for fact cut and groups if (graph=="factorial") fact=TRUE else fact=FALSE if (fact & edgelist) stop('Factorial graph needs a correlation matrix') # if (graph=="concentration") partial=TRUE else partial=FALSE # if(is.null(qgraphObject$Arguments$cutQuantile)) cutQuantile <- 0.9 else cutQuantile <- qgraphObject$Arguments$cutQuantile defineCut <- FALSE if(is.null(qgraphObject$Arguments[['cut']])) { cut=0 # if (nNodes<50) if (nNodes>=20 | fact) { cut=0.3 defineCut <- TRUE } if (mode=="sig") cut <- ifelse(length(alpha)>1,sigScale(alpha[length(alpha)-1]),sigScale(alpha[length(alpha)])) } else if (mode != "sig") cut <- ifelse(is.na(qgraphObject$Arguments[['cut']]),0,qgraphObject$Arguments[['cut']]) else cut <- ifelse(length(alpha)>1,sigScale(alpha[length(alpha)-1]),sigScale(alpha[length(alpha)])) if(is.null(qgraphObject$Arguments$groups)) groups=NULL else groups=qgraphObject$Arguments$groups if (is.factor(groups) | is.character(groups)) groups <- tapply(1:length(groups),groups,function(x)x) # Factorial graph: if(is.null(qgraphObject$Arguments$nfact)) { nfact=NULL } else nfact=qgraphObject$Arguments$nfact if (fact) { if (is.null(nfact)) { if (is.null(groups)) nfact=sum(eigen(input)$values>1) else nfact=length(groups) } loadings=loadings(factanal(factors=nfact,covmat=input,rotation="promax")) loadings=loadings[1:nrow(loadings),1:ncol(loadings)] loadings[loadings=cut]=1 input=(loadings%*%t(loadings)>0)*1 diag(input)=0 } # Glasso arguments: if(is.null(qgraphObject$Arguments[['sampleSize']])) { sampleSize <- NULL } else sampleSize <- qgraphObject$Arguments[['sampleSize']] if(is.null(qgraphObject$Arguments[['countDiagonal']])) { countDiagonal <- FALSE } else countDiagonal <- qgraphObject$Arguments[['countDiagonal']] # SET DEFAULT qgraphObject$Arguments: # General qgraphObject$Arguments: if(is.null(qgraphObject$Arguments$DoNotPlot)) DoNotPlot=FALSE else DoNotPlot=qgraphObject$Arguments$DoNotPlot if(is.null(qgraphObject$Arguments[['layout']])) layout=NULL else layout=qgraphObject$Arguments[['layout']] if(is.null(qgraphObject$Arguments$maximum)) maximum=0 else maximum=qgraphObject$Arguments$maximum if(is.null(qgraphObject$Arguments$minimum)) { # if (nNodes<50) minimum=0 # if (nNodes>=50) minimum=0.1 minimum <- 0 if (mode=="sig") minimum <- ifelse(length(alpha)>1,sigScale(alpha[length(alpha)]),0) } else { if (mode!="sig") minimum=qgraphObject$Arguments$minimum else minimum <- ifelse(length(alpha)>1,sigScale(alpha[length(alpha)]),0) if (is.character(minimum)) { if (grepl("sig",minimum,ignore.case = TRUE)) { if (is.null(sampleSize)) { stop("'sampleSize' argument must be assigned to use significance as minimum") } if (graph == "default") { warning("'graph' argument did not specify type of graph. Assuming correlation graph (graph = 'cor')") graph <- "cor" } if (graph %in% c("cor","pcor")) { # Find threshold for significance! # difference between cor and pcor is in df: if (graph == "cor") { df <- sampleSize - 2 } else { df <- sampleSize - 2 - (nNodes - 2) } siglevel <- max(alpha)/2 if (bonf) { siglevel <- siglevel / (nNodes*(nNodes-1)/2) } t <- abs(qt(siglevel, df, lower.tail=TRUE)) minimum <- t/sqrt(t^2+df) } else stop("minimum = 'sig' is not supported with this 'graph' argument") } else stop("Minimum is specified a string which is not 'sig'.") } } if (minimum < 0) { warning("'minimum' set to absolute value") minimum <- abs(minimum) } # Threshold argument removes edges from network: if(is.null(qgraphObject$Arguments[['threshold']])) { threshold <- 0 } else { threshold <- qgraphObject$Arguments[['threshold']] } if(is.null(qgraphObject$Arguments$weighted)) weighted=NULL else weighted=qgraphObject$Arguments$weighted if(is.null(qgraphObject$Arguments$rescale)) rescale=TRUE else rescale=qgraphObject$Arguments$rescale if(is.null(qgraphObject$Arguments[['edge.labels']])) edge.labels=FALSE else edge.labels=qgraphObject$Arguments[['edge.labels']] if(is.null(qgraphObject$Arguments[['edge.label.bg']])) edge.label.bg=TRUE else edge.label.bg=qgraphObject$Arguments[['edge.label.bg']] if (identical(FALSE,edge.label.bg)) plotELBG <- FALSE else plotELBG <- TRUE if(is.null(qgraphObject$Arguments[['edge.label.margin']])) edge.label.margin=0 else edge.label.margin=qgraphObject$Arguments[['edge.label.margin']] ### Themes ### # Default theme: posCol <- c("#009900","darkgreen") negCol <- c("#BF0000","red") bcolor <- NULL bg <- FALSE negDashed <- FALSE parallelEdge <- FALSE fade <- NA border.width <- 1 font <- 1 unCol <- "#808080" # if (length(groups) < 8){ # palette <- "colorblind" # } else { palette <- "rainbow" # } if(!is.null(qgraphObject$Arguments[['theme']])){ theme <- qgraphObject$Arguments[['theme']] if (length(theme) > 1) stop("'theme' must be of lenght 1") if (!theme %in% c("classic","Hollywood","Leuven","Reddit","TeamFortress","Fried", "Borkulo","colorblind","gray","gimme","GIMME","neon","pride")){ stop(paste0("Theme '",theme,"' is not supported.")) } # Themes: if (theme == "classic"){ posCol <- c("#009900","darkgreen") negCol <- c("#BF0000","red") } else if (theme == "Leuven"){ dots <- list(...) dots$DoNotPlot <- TRUE dots$theme <- "classic" dots$input <- input return(getWmat(do.call(qgraph,dots ))) } else if (theme == "Hollywood"){ negCol <- "#FFA500" posCol <- "#005AFF" } else if (theme == "Reddit"){ posCol <- "#CCCCFF" negCol <- "#FF4500" } else if (theme == "TeamFortress"){ negCol <- "#B8383B" posCol <- "#5885A2" } else if (theme == "Fried"){ posCol <- "black" negCol <- "black" bg <- "gray" palette <- "gray" } else if (theme == "Borkulo"){ posCol <- "darkblue" negCol <- "red" bcolor <- "darkblue" } else if (theme == "colorblind"){ posCol <- c("#0000D5","darkblue") negCol <- c("#BF0000","red") palette <- "colorblind" } else if (theme == "gray" | theme == "grey"){ posCol <- negCol <- c("gray10","black") palette <- "gray" negDashed <- TRUE } else if (theme == "gimme" | theme == "GIMME"){ posCol <- "red" negCol <- "blue" parallelEdge <- TRUE fade <- FALSE } else if(theme == "neon"){ bg <- "black" label.color <- "#8ffcff" bcolor <- "#8ffcff" border.width <- 4 font <- 2 posCol <- "#f3ea5f" negCol <- "#c04df9" unCol <- "#8ffcff" palette <- "neon" }else if(theme == "pride"){ posCol <- "#1AB3FF" negCol <- "#FF1C8D" unCol <- "#613915" palette <- "pride" } } # Overwrite: if(!is.null(qgraphObject$Arguments[['parallelEdge']])) parallelEdge <- qgraphObject$Arguments[['parallelEdge']] if(!is.null(qgraphObject$Arguments[['fade']])) fade <- qgraphObject$Arguments[['fade']] if(!is.null(qgraphObject$Arguments[['negDashed']])) negDashed <- qgraphObject$Arguments[['negDashed']] if(!is.null(qgraphObject$Arguments[['posCol']])) posCol <- qgraphObject$Arguments[['posCol']] if(!is.null(qgraphObject$Arguments[['negCol']])) negCol <- qgraphObject$Arguments[['negCol']] if(!is.null(qgraphObject$Arguments[['border.width']])) border.width <- qgraphObject$Arguments[['border.width']] if(!is.null(qgraphObject$Arguments[['font']])) font <- qgraphObject$Arguments[['font']] if(is.null(qgraphObject$Arguments[['edge.label.font']])) edge.label.font=font else edge.label.font=qgraphObject$Arguments[['edge.label.font']] if(is.null(qgraphObject$Arguments[['label.font']])) label.font <- font else label.font <- qgraphObject$Arguments[['label.font']] if(!is.null(qgraphObject$Arguments[['unCol']])) unCol <- qgraphObject$Arguments[['unCol']] if(is.null(qgraphObject$Arguments[['probCol']])) probCol <- "black" else probCol <- qgraphObject$Arguments[['probCol']] if(!is.null(qgraphObject$Arguments[['probabilityEdges']])) { if (isTRUE(qgraphObject$Arguments[['probabilityEdges']])) { posCol <- probCol } } if (length(posCol)==1) posCol <- rep(posCol,2) if (length(posCol)!=2) stop("'posCol' must be of length 1 or 2.") if (length(negCol)==1) negCol <- rep(negCol,2) if (length(negCol)!=2) stop("'negCol' must be of length 1 or 2.") # border color: if(!is.null(qgraphObject$Arguments[['border.color']])) { bcolor <- qgraphObject$Arguments[['border.color']] } # Alias? if(!is.null(qgraphObject$Arguments[['border.colors']])) { bcolor <- qgraphObject$Arguments[['border.colors']] } # BG: if(!is.null(qgraphObject$Arguments$bg)) bg <- qgraphObject$Arguments$bg # Palette: # PALETTE either one of the defaults or a function if(!is.null(qgraphObject$Arguments[['palette']])){ palette <- qgraphObject$Arguments[['palette']] } # Check palette: if (!is.function(palette)){ if (length(palette) != 1 && !is.character(palette)){ stop("'palette' must be a single string.") } if (!palette %in% c("rainbow","colorblind","R","ggplot2","gray","grey","pastel","neon","pride")){ stop(paste0("Palette '",palette,"' is not supported.")) } } ### if(is.null(qgraphObject$Arguments[['colFactor']])) colFactor <- 1 else colFactor <- qgraphObject$Arguments[['colFactor']] if(is.null(qgraphObject$Arguments[['edge.color']])) edge.color <- NULL else edge.color=qgraphObject$Arguments[['edge.color']] if(is.null(qgraphObject$Arguments[['edge.label.cex']])) edge.label.cex=1 else edge.label.cex=qgraphObject$Arguments[['edge.label.cex']] if(is.null(qgraphObject$Arguments[['edge.label.position']])) edge.label.position <- 0.5 else edge.label.position=qgraphObject$Arguments[['edge.label.position']] if(is.null(qgraphObject$Arguments$directed)) { if (edgelist) directed=TRUE else directed=NULL } else directed=qgraphObject$Arguments$directed if(is.null(qgraphObject$Arguments[['legend']])) { if ((!is.null(groups) & !is.null(names(groups))) | !is.null(nodeNames)) legend <- TRUE else legend <- FALSE } else legend <- qgraphObject$Arguments[['legend']] stopifnot(is.logical(legend)) # if (is.null(groups)) legend <- FALSE if(is.null(qgraphObject$Arguments$plot)) plot=TRUE else plot=qgraphObject$Arguments$plot if(is.null(qgraphObject$Arguments$rotation)) rotation=NULL else rotation=qgraphObject$Arguments$rotation if(is.null(qgraphObject$Arguments[['layout.control']])) layout.control=0.5 else layout.control=qgraphObject$Arguments[['layout.control']] # repulsion controls the repulse.rad argument if(is.null(qgraphObject$Arguments[['repulsion']])) repulsion=1 else repulsion=qgraphObject$Arguments[['repulsion']] if(is.null(qgraphObject$Arguments[['layout.par']])) { if (is.null(layout) || identical(layout,"spring")) layout.par <- list(repulse.rad = nNodes^(repulsion * 3)) else layout.par <- list() } else layout.par=qgraphObject$Arguments[['layout.par']] if(is.null(qgraphObject$Arguments[['layoutRound']])){ layoutRound <- TRUE } else { layoutRound <- qgraphObject$Arguments[['layoutRound']] } layout.par$round <- layoutRound if(is.null(qgraphObject$Arguments$details)) details=FALSE else details=qgraphObject$Arguments$details if(is.null(qgraphObject$Arguments$title)) title <- NULL else title <- qgraphObject$Arguments$title if(is.null(qgraphObject$Arguments[['title.cex']])) title.cex <- NULL else title.cex <- qgraphObject$Arguments[['title.cex']] if(is.null(qgraphObject$Arguments$preExpression)) preExpression <- NULL else preExpression <- qgraphObject$Arguments$preExpression if(is.null(qgraphObject$Arguments$postExpression)) postExpression <- NULL else postExpression <- qgraphObject$Arguments$postExpression # Output qgraphObject$Arguments: if(is.null(qgraphObject$Arguments[['edge.label.color']])) ELcolor <- NULL else ELcolor <- qgraphObject$Arguments[['edge.label.color']] # if(is.null(qgraphObject$Arguments[['border.width']])) border.width <- 1 else border.width <- qgraphObject$Arguments[['border.width']] #if (!DoNotPlot & !is.null(dev.list()[dev.cur()])) #{ # par(mar=c(0,0,0,0), bg=background) # if (plot) # { # plot(1, ann = FALSE, axes = FALSE, xlim = c(-1.2, 1.2), ylim = c(-1.2 ,1.2),type = "n", xaxs = "i", yaxs = "i") # plot <- FALSE # } #} PlotOpen <- !is.null(dev.list()[dev.cur()]) if(is.null(qgraphObject$Arguments$filetype)) filetype="default" else filetype=qgraphObject$Arguments$filetype if(is.null(qgraphObject$Arguments$filename)) filename="qgraph" else filename=qgraphObject$Arguments$filename if(is.null(qgraphObject$Arguments$width)) width <- 7 else width <- qgraphObject$Arguments[['width']] if(is.null(qgraphObject$Arguments$height)) height <- 7 else height <- qgraphObject$Arguments[['height']] if(is.null(qgraphObject$Arguments$pty)) pty='m' else pty=qgraphObject$Arguments$pty if(is.null(qgraphObject$Arguments$res)) res=320 else res=qgraphObject$Arguments$res if(is.null(qgraphObject$Arguments[['normalize']])) normalize <- TRUE else normalize <- qgraphObject$Arguments[['normalize']] # Graphical qgraphObject$Arguments # defNodeSize <- max((-1/72)*(nNodes)+5.35,1) ### Default node size, used as standard unit. if(is.null(qgraphObject$Arguments[['mar']])) mar <- c(3,3,3,3)/10 else mar <- qgraphObject$Arguments[["mar"]]/10 if(is.null(qgraphObject$Arguments[['vsize']])) { vsize <- 8*exp(-nNodes/80)+1 # vsize <- max((-1/72)*(nNodes)+5.35,1) if(is.null(qgraphObject$Arguments[['vsize2']])) vsize2 <- vsize else vsize2 <- vsize * qgraphObject$Arguments[['vsize2']] } else { vsize <- qgraphObject$Arguments[['vsize']] if(is.null(qgraphObject$Arguments[['vsize2']])) vsize2 <- vsize else vsize2 <- qgraphObject$Arguments[['vsize2']] } if(!is.null(qgraphObject$Arguments[['node.width']])) { vsize <- vsize * qgraphObject$Arguments[['node.width']] } if(!is.null(qgraphObject$Arguments[['node.height']])) { vsize2 <- vsize2 * qgraphObject$Arguments[['node.height']] } if(is.null(qgraphObject$Arguments$color)) color=NULL else color=qgraphObject$Arguments$color if(is.null(qgraphObject$Arguments[['gray']])) gray <- FALSE else gray <- qgraphObject$Arguments[['gray']] if (gray) { posCol <- negCol <- c("gray10","black") warning("The 'gray' argument is deprecated, please use theme = 'gray' instead.") } if(is.null(qgraphObject$Arguments[['pastel']])){ pastel <- FALSE } else { warning("The 'pastel' argument is deprecated, please use palette = 'pastel' instead.") palette <- "pastel" pastel <- qgraphObject$Arguments[['pastel']] } if(is.null(qgraphObject$Arguments[['piePastel']])) piePastel <- FALSE else piePastel <- qgraphObject$Arguments[['piePastel']] if(is.null(qgraphObject$Arguments[['rainbowStart']])) rainbowStart <- 0 else rainbowStart <- qgraphObject$Arguments[['rainbowStart']] if(is.null(qgraphObject$Arguments$bgcontrol)) bgcontrol=6 else bgcontrol=qgraphObject$Arguments$bgcontrol if(is.null(qgraphObject$Arguments$bgres)) bgres=100 else bgres=qgraphObject$Arguments$bgres if(is.null(qgraphObject$Arguments[['trans',exact=FALSE]])) transparency <- NULL else transparency <- qgraphObject$Arguments[['trans',exact=FALSE]] if (is.null(transparency)) { if (isTRUE(bg)) transparency <- TRUE else transparency <- FALSE } # Automatic fading? # autoFade <- isTRUE(fade) # if (isTRUE(fade)){ # fade <- NA # } # if (is.logical(fade)){ fade <- ifelse(fade,NA,1) } if (identical(fade,FALSE)){ fade <- 1 } if(is.null(qgraphObject$Arguments[['loop']])) loop=1 else loop=qgraphObject$Arguments[['loop']] if(is.null(qgraphObject$Arguments[['loopRotation']])) { loopRotation <- NA } else { loopRotation=qgraphObject$Arguments[['loopRotation']] } if(is.null(qgraphObject$Arguments[['residuals']])) residuals=FALSE else residuals=qgraphObject$Arguments[['residuals']] if(is.null(qgraphObject$Arguments[['residScale']])) residScale=1 else residScale=qgraphObject$Arguments[['residScale']] if(is.null(qgraphObject$Arguments[['residEdge']])) residEdge=FALSE else residEdge=qgraphObject$Arguments[['residEdge']] if(is.null(qgraphObject$Arguments[['bars']])) bars <- list() else bars <- qgraphObject$Arguments[['bars']] if(is.null(qgraphObject$Arguments[['barSide']])) barSide <- 1 else barSide <- qgraphObject$Arguments[['barSide']] if(is.null(qgraphObject$Arguments[['barLength']])) barLength <- 0.5 else barLength <- qgraphObject$Arguments[['barLength']] if(is.null(qgraphObject$Arguments[['barColor']])) barColor <- 'border' else barColor <- qgraphObject$Arguments[['barColor']] if(is.null(qgraphObject$Arguments[['barsAtSide']])) barsAtSide <- FALSE else barsAtSide <- qgraphObject$Arguments[['barsAtSide']] # Means and SDs: if(is.null(qgraphObject$Arguments[['means']])) means <- NA else means <- qgraphObject$Arguments[['means']] if(is.null(qgraphObject$Arguments[['SDs']])) SDs <- NA else SDs <- qgraphObject$Arguments[['SDs']] if(is.null(qgraphObject$Arguments[['meanRange']])) { if (all(is.na(means))) meanRange <- c(NA,NA) else meanRange <- range(means,na.rm=TRUE) }else meanRange <- qgraphObject$Arguments[['meanRange']] if (!is.list(bars)) bars <- as.list(bars) if(is.null(qgraphObject$Arguments[['CircleEdgeEnd']])) CircleEdgeEnd=FALSE else CircleEdgeEnd=qgraphObject$Arguments[['CircleEdgeEnd']] if(is.null(qgraphObject$Arguments[['loopAngle']])) loopangle=pi/2 else loopAngle=qgraphObject$Arguments[['loopAngle']] if(is.null(qgraphObject$Arguments[['legend.cex']])) legend.cex=0.6 else legend.cex=qgraphObject$Arguments[['legend.cex']] if(is.null(qgraphObject$Arguments[['legend.mode']])) { if (!is.null(nodeNames) && !is.null(groups)){ legend.mode <- "style1" # or style2 } else if (!is.null(nodeNames)) legend.mode <- "names" else legend.mode <- "groups" } else legend.mode=qgraphObject$Arguments[['legend.mode']] if(is.null(qgraphObject$Arguments$borders)){ # if (!drawPies){ borders <- TRUE # } } else { # if (drawPies){ # warning("'borders' argument ignored if 'pie' argument is used.") # } else { borders <- qgraphObject$Arguments[['borders']] # } } ### Polygon lookup list: polygonList = list( ellipse = ELLIPSEPOLY, heart = HEARTPOLY, star = STARPOLY, crown = CROWNPOLY ) if(!is.null(qgraphObject$Arguments[['polygonList']])) polygonList <- c( polygonList, qgraphObject$Arguments[['polygonList']]) # Rescale to -1 - 1 and compute radians per point: for (i in seq_along(polygonList)) { polygonList[[i]]$x <- (polygonList[[i]]$x - min(polygonList[[i]]$x)) / (max(polygonList[[i]]$x) - min(polygonList[[i]]$x)) * 2 - 1 polygonList[[i]]$y <- (polygonList[[i]]$y - min(polygonList[[i]]$y)) / (max(polygonList[[i]]$y) - min(polygonList[[i]]$y)) * 2 - 1 } if(is.null(qgraphObject$Arguments[['label.scale']])) label.scale=TRUE else label.scale=qgraphObject$Arguments[['label.scale']] if(is.null(qgraphObject$Arguments[['label.cex']])){ if (label.scale){ label.cex <- 1 } else { label.cex <- 1 } } else label.cex <- qgraphObject$Arguments[['label.cex']] if(is.null(qgraphObject$Arguments$label.scale.equal)) label.scale.equal=FALSE else label.scale.equal=qgraphObject$Arguments$label.scale.equal if(is.null(qgraphObject$Arguments$label.fill.horizontal)) label.fill.horizontal<-1 else label.fill.horizontal <- qgraphObject$Arguments$label.fill.horizontal if(is.null(qgraphObject$Arguments$label.fill.vertical)) label.fill.vertical<-1 else label.fill.vertical <- qgraphObject$Arguments$label.fill.vertical if(is.null(qgraphObject$Arguments$node.label.offset)) node.label.offset<-c(0.5, 0.5) else node.label.offset <- qgraphObject$Arguments$node.label.offset if(is.null(qgraphObject$Arguments$node.label.position)) node.label.position<-NULL else node.label.position <- qgraphObject$Arguments$node.label.position if(is.null(qgraphObject$Arguments$scores)) scores=NULL else scores=qgraphObject$Arguments$scores if(is.null(qgraphObject$Arguments$scores.range)) scores.range=NULL else scores.range=qgraphObject$Arguments$scores.range if(is.null(qgraphObject$Arguments$lty)) lty=1 else lty=qgraphObject$Arguments$lty if(is.null(qgraphObject$Arguments$vTrans)) vTrans=255 else vTrans=qgraphObject$Arguments$vTrans # if(is.null(qgraphObject$Arguments[['overlay']])) overlay <- FALSE else overlay <- qgraphObject$Arguments[['overlay']] # if(is.null(qgraphObject$Arguments[['overlaySize']])) overlaySize <- 0.5 else overlaySize <- qgraphObject$Arguments[['overlaySize']] if(is.null(qgraphObject$Arguments[['GLratio']])) GLratio <- 2.5 else GLratio <- qgraphObject$Arguments[['GLratio']] if(is.null(qgraphObject$Arguments$layoutScale)) layoutScale <- 1 else layoutScale <- qgraphObject$Arguments$layoutScale if(is.null(qgraphObject$Arguments[['layoutOffset']])) layoutOffset <- 0 else layoutOffset <- qgraphObject$Arguments[['layoutOffset']] # Aspect ratio: if(is.null(qgraphObject$Arguments[['aspect']])) aspect=FALSE else aspect=qgraphObject$Arguments[['aspect']] # qgraphObject$Arguments for directed graphs: if(is.null(qgraphObject$Arguments[['curvePivot']])) curvePivot <- FALSE else curvePivot <- qgraphObject$Arguments[['curvePivot']] if (isTRUE(curvePivot)) curvePivot <- 0.1 if(is.null(qgraphObject$Arguments[['curveShape']])) curveShape <- -1 else curveShape <- qgraphObject$Arguments[['curveShape']] if(is.null(qgraphObject$Arguments[['curvePivotShape']])) curvePivotShape <- 0.25 else curvePivotShape <- qgraphObject$Arguments[['curvePivotShape']] if(is.null(qgraphObject$Arguments[['curveScale']])) curveScale <- TRUE else curveScale <- qgraphObject$Arguments[['curveScale']] if(is.null(qgraphObject$Arguments[['curveScaleNodeCorrection']])) curveScaleNodeCorrection <- TRUE else curveScaleNodeCorrection <- qgraphObject$Arguments[['curveScaleNodeCorrection']] if(is.null(qgraphObject$Arguments[['parallelAngle']])) parallelAngle <- NA else parallelAngle <- qgraphObject$Arguments[['parallelAngle']] if(is.null(qgraphObject$Arguments[['parallelAngleDefault']])) parallelAngleDefault <- pi/6 else parallelAngleDefault <- qgraphObject$Arguments[['parallelAngleDefault']] if(is.null(qgraphObject$Arguments[['curveDefault']])) curveDefault <- 1 else curveDefault <- qgraphObject$Arguments[['curveDefault']] if(is.null(qgraphObject$Arguments[['curve']])) { if (any(parallelEdge)) { curve <- ifelse(parallelEdge,0,NA) } else curve <- NA } else { curve <- qgraphObject$Arguments[['curve']] if (length(curve)==1) { curveDefault <- curve curve <- NA } } if(is.null(qgraphObject$Arguments[['curveAll']])) curveAll <- FALSE else curveAll <- qgraphObject$Arguments[['curveAll']] if (curveAll) { curve[is.na(curve)] <- curveDefault } if(is.null(qgraphObject$Arguments$arrows)) arrows=TRUE else arrows=qgraphObject$Arguments$arrows # asize=asize*2.4/height if(is.null(qgraphObject$Arguments$open)) open=FALSE else open=qgraphObject$Arguments$open if(is.null(qgraphObject$Arguments$bidirectional)) bidirectional=FALSE else bidirectional=qgraphObject$Arguments$bidirectional # qgraphObject$Arguments for SVG pictures: # if(is.null(qgraphObject$Arguments$tooltips)) tooltips=NULL else tooltips=qgraphObject$Arguments$tooltips # if(is.null(qgraphObject$Arguments$SVGtooltips)) SVGtooltips=NULL else SVGtooltips=qgraphObject$Arguments$SVGtooltips if(is.null(qgraphObject$Arguments$hyperlinks)) hyperlinks=NULL else hyperlinks=qgraphObject$Arguments$hyperlinks # qgraphObject$Arguments for TEX: if(is.null(qgraphObject$Arguments$standAlone)) standAlone=TRUE else standAlone=qgraphObject$Arguments$standAlone ### EASTER EGGS ### if(is.null(qgraphObject$Arguments[['XKCD']])) XKCD <- FALSE else XKCD <- TRUE # # Legend setting 1 # if (is.null(legend)) # { # if (is.null(groups)) legend=FALSE else legend=TRUE # } #if ((legend & filetype!='pdf' & filetype!='eps') | filetype=="svg") if ((legend&is.null(scores))|(identical(filetype,"svg"))) { width=width*(1+(1/GLratio)) } # if (!DoNotPlot) # { # # # Start output: # if (filetype=='default') if (is.null(dev.list()[dev.cur()])) dev.new(rescale="fixed",width=width,height=height) # if (filetype=='R') dev.new(rescale="fixed",width=width,height=height) # if (filetype=='X11' | filetype=='x11') x11(width=width,height=height) # if (filetype=='eps') postscript(paste(filename,".eps",sep=""),height=height,width=width, horizontal=FALSE) # if (filetype=='pdf') pdf(paste(filename,".pdf",sep=""),height=height,width=width) # if (filetype=='tiff') tiff(paste(filename,".tiff",sep=""),units='in',res=res,height=height,width=width) # if (filetype=='png') png(paste(filename,".png",sep=""),units='in',res=res,height=height,width=width) # if (filetype=='jpg' | filetype=='jpeg') jpeg(paste(filename,".jpg",sep=""),units='in',res=res,height=height,width=width) # if (filetype=="svg") # { # if (R.Version()$arch=="x64") stop("RSVGTipsDevice is not available for 64bit versions of R.") # require("RSVGTipsDevice") # devSVGTips(paste(filename,".svg",sep=""),width=width,height=height,title=filename) # } # if (filetype=="tex") # { # # # Special thanks to Charlie Sharpsteen for supplying these tikz codes on stackoverflow.com !!! # # # # if (!suppressPackageStartupMessages(require(tikzDevice,quietly=TRUE))) stop("tikzDevice must be installed to use filetype='tex'") # # opt= c( # # getOption('tikzLatexPackages'), # # "\\def\\tooltiptarget{\\phantom{\\rule{1mm}{1mm}}}", # # "\\newbox\\tempboxa\\setbox\\tempboxa=\\hbox{}\\immediate\\pdfxform\\tempboxa \\edef\\emptyicon{\\the\\pdflastxform}", # # "\\newcommand\\tooltip[1]{\\pdfstartlink user{/Subtype /Text/Contents (#1)/AP <>}\\tooltiptarget\\pdfendlink}" # # ) # # # # place_PDF_tooltip <- function(x, y, text) # # { # # # # # Calculate coordinates # # tikzX <- round(grconvertX(x, to = "device"), 2) # # tikzY <- round(grconvertY(y, to = "device"), 2) # # # Insert node # # tikzAnnotate(paste( # # "\\node at (", tikzX, ",", tikzY, ") ", # # "{\\tooltip{", text, "}};", # # sep = '' # # )) # # invisible() # # } # # # # print("NOTE: Using 'tex' as filetype will take longer to run than other filetypes") # # # # tikzDevice:::tikz(paste(filename,".tex",sep=""), standAlone = standAlone, width=width, height=height, packages=opt) # # stop("Tikz device no longer supported due to removal from CRAN. Please see www.sachaepskamp.com/qgraph for a fix") # } # } #if (!filetype%in%c('pdf','png','jpg','jpeg','svg','R','eps','tiff')) warning(paste("File type",filetype,"is not supported")) # Specify background: if (is.null(background) && !DoNotPlot){ background <- par("bg") if (background == "transparent") background <- "white" } else { background <- "white" } if (isColor(bg)) background <- bg # Remove alpha: background <- col2rgb(background, alpha = TRUE) background <- rgb(background[1],background[2],background[3],background[4],maxColorValue=255) if (is.null(subplotbg)) subplotbg <- background if (isTRUE(edge.label.bg)) edge.label.bg <- background if(is.null(qgraphObject$Arguments[['label.color']])) { # if(is.null(qgraphObject$Arguments$lcolor)) lcolor <- ifelse(mean(col2rgb(background)/255) > 0.5,"black","white") else lcolor <- qgraphObject$Arguments$lcolor if(is.null(qgraphObject$Arguments$lcolor)) lcolor <- NA else lcolor <- qgraphObject$Arguments$lcolor } else lcolor <- qgraphObject$Arguments[['label.color']] # Legend setting 2 if (legend & !is.null(scores)) { layout(t(1:2),widths=c(GLratio,1)) } # Weighted settings: if (is.null(weighted)) { if (edgelist) { if (ncol(input)==2) weighted=FALSE else weighted=TRUE } if (!edgelist) { if (all(unique(c(input)) %in% c(0,1)) & !grepl("sig",mode)) weighted <- FALSE else weighted <- TRUE } } if (!weighted) cut=0 # par settings: #parOrig <- par(no.readonly=TRUE) if (!DoNotPlot) { par(pty=pty) } if (!edgelist) { if (!is.logical(directed)) if (is.null(directed)) { if (!isSymmetric(unname(input))) directed=TRUE else directed=FALSE } } # Set default edge width: if(is.null(qgraphObject$Arguments[["esize"]])) { if (weighted) { # esize <- max((-1/72)*(nNodes)+5.35,2) esize <- 15*exp(-nNodes/90)+1 } else { esize <- 2 } if (any(directed)) esize <- max(esize/2,1) } else esize <- qgraphObject$Arguments$esize # asize default: if(is.null(qgraphObject$Arguments[["asize"]])) { # asize <- max((-1/10)*(nNodes)+4,1) # asize <- ifelse(nNodes>10,2,3) asize <- 2*exp(-nNodes/20)+2 } else asize <- qgraphObject$Arguments[["asize"]] if(!is.null(qgraphObject$Arguments[["edge.width"]])) { esize <- esize * qgraphObject$Arguments[["edge.width"]] asize <- asize * sqrt(qgraphObject$Arguments[["edge.width"]]) } ## arrowAngle default: if(is.null(qgraphObject$Arguments[["arrowAngle"]])) { if (weighted) arrowAngle <- pi/6 else arrowAngle <- pi/8 } else { arrowAngle <- qgraphObject$Arguments[["arrowAngle"]] } ########### GRAPHICAL MODEL SELECTION ####### if (graph == "cor") { if(!all(eigen(input)$values > 0)) { warning("Correlation/covariance matrix is not positive definite. Finding nearest positive definite matrix") input <- as.matrix(Matrix::nearPD(input, keepDiag = TRUE, ensureSymmetry = TRUE)$mat) } } # Partial graph: if (graph != "default") { if (edgelist) stop("Graph requires correlation or covariance matrix") # Check for symmetric matrix: if (!isSymmetric(input)) { stop("Input matrix is not symmetric, thus can not be a correlation or covariance matrix.") } # Check for positive definiteness (glasso does its own check): if (graph != "glasso") { if(!all(eigen(input)$values > 0)) { warning("Correlation/covariance matrix is not positive definite. Finding nearest positive definite matrix") input <- as.matrix(Matrix::nearPD(input, keepDiag = TRUE, ensureSymmetry = TRUE)$mat) } } # Association graph: if (graph == "cor") { if (!all(diag(input) == 1)){ input <- cov2cor(input) } } # Concentration graph: if (graph=="pcor") { coln <- colnames(input) rown <- rownames(input) input <- cor2pcor(input) rownames(input) <- rown colnames(input) <- coln } # # FDR: # if (tolower(graph)=="fdr.cor") # { # if (!all(diag(input) == 1)){ # input <- cov2cor(input) # } # input <- FDRnetwork(input, FDRcutoff) # } # # if (tolower(graph)=="fdr.pcor") # { # input <- cor2pcor(input) # input <- FDRnetwork(input, FDRcutoff) # } # # if (tolower(graph) == "fdr") # { # input <- cor2pcor(input) # testResult <- GeneNet::ggm.test.edges(input, fdr = TRUE, plot = FALSE) # net <- GeneNet::extract.network(testResult) # input <- matrix(0, nrow(input), ncol(input)) # for (i in seq_len(nrow(net))) # { # input[net$node1[i],net$node2[i]] <- input[net$node2[i],net$node1[i]] <- net$pcor[i] # } # } # Glasso graph: if (graph == "glasso") { if (edgelist) stop("Concentration graph requires correlation matrix") if (is.null(sampleSize)) stop("'sampleSize' argument is needed for glasso estimation") input <- EBICglasso(input, sampleSize, gamma = tuning, refit=refit, lambda.min.ratio = lambda.min.ratio, threshold = isTRUE(threshold)) } if (graph == "ggmModSelect") { if (edgelist) stop("Concentration graph requires correlation matrix") if (is.null(sampleSize)) stop("'sampleSize' argument is needed for ggmModSelect estimation") input <- ggmModSelect(input, sampleSize, gamma = tuning, lambda.min.ratio = lambda.min.ratio)$graph } diag(input) <- 1 input <- as.matrix(forceSymmetric(input)) } ## Thresholding #### # If threshold is TRUE and graph = "glasso" or "ggmModSelect", set to FALSE: if (isTRUE(threshold) && (graph == "glasso" || graph == "ggmModSelect")){ threshold <- qgraphObject$Arguments[['threshold']] <- 0 } if (is.character(threshold)) { if (graph == "default") { if (verbose) message("'threshold' is assigned a string but 'graph' is not assigned. Detecting if input could be a correlation matrix.") # Detect if graph could be correlations or covariance matrix: # Check if input was a matrix: if (!is.matrix(input) | edgelist) stop(paste0("'",threshold,"' threshold requires a (partial) correlation/covariance matrix as input")) # Check if input is square matrix: if (!isSymmetric(input)) stop(paste0("'",threshold,"' threshold requires a (partial) correlation/covariance matrix as input: input was not a square matrix.")) # Check if input is positive semi definite: if (any(eigen(input)$values < 0)) stop(paste0("'",threshold,"' threshold requires a (partial) correlation/covariance matrix as input: input was not a positive semi-definite matrix")) # If these checks are passed assume matrix is correlation or covariance: } else { if (!graph %in% c("cor","pcor")) { stop("Thresholding by significance level only supported for graph = 'cor' or graph = 'pcor'") } } # Stop for incorrect threshold: if (!threshold %in% c('sig','holm', 'hochberg', 'hommel', 'bonferroni', 'BH', 'BY', 'fdr', 'none', 'locfdr')) { stop("'threshold' argument must be number or 'sig','holm', 'hochberg', 'hommel', 'bonferroni', 'BH', 'BY', 'fdr', 'none' or 'locfdr'") } # Significance: if (threshold != "locfdr") { if (grepl("sig",threshold,ignore.case=TRUE)) { threshold <- "none" } if (is.null(sampleSize)) { stop("'sampleSize' argument is needed for all thresholding with significance except 'locfdr'") } nadj <- sampleSize if (graph == "pcor") { nadj <- nadj - (nNodes - 2) } # Fix for col/row names bugs: if (is.null(colnames(input))){ colnames(input) <- paste0("V",seq_len(ncol(input))) } if (is.null(rownames(input))){ rownames(input) <- paste0("V",seq_len(ncol(input))) } # Compute p-values: if (all(diag(input)==1)) { pvals <- psych::corr.p(input,n = nadj, adjust = threshold, alpha = max(alpha))$p } else { pvals <- psych::corr.p(cov2cor(input), n = nadj, adjust = threshold, alpha = max(alpha))$p } # Symmetrize: pvals[lower.tri(pvals)] <- t(pvals)[lower.tri(pvals)] # Remove insignificant edges: input <- input * (pvals < max(alpha)) } else { input <- FDRnetwork(input, FDRcutoff) } threshold <- 0 } #######################3 ## diag default: if(is.null(qgraphObject$Arguments[['diag']])) { if (edgelist) diag <- FALSE else diag <- length(unique(diag(input))) > 1 } else { diag <- qgraphObject$Arguments$diag } # Diag: diagCols=FALSE diagWeights=0 if (is.character(diag)) { if (diag=="col" & !edgelist) { diagWeights=diag(input) diagCols=TRUE diag=FALSE } } if (is.numeric(diag)) { if (length(diag)==1) diag=rep(diag,nNodes) if (length(diag)!=nNodes) stop("Numerical assignment of the 'diag' argument must be if length equal to the number of nodes") diagWeights=diag diagCols=TRUE diag=FALSE } if (is.logical(diag)) if (!diag & !edgelist) diag(input)=0 # CREATE EDGELIST: E <- list() # Remove nonfinite weights: if (any(!is.finite(input))) { input[!is.finite(input)] <- 0 warning("Non-finite weights are omitted") } if (edgelist) { E$from=input[,1] E$to=input[,2] if (ncol(input)>2) E$weight=input[,3] else E$weight=rep(1,length(E$from)) if (length(directed)==1) directed=rep(directed,length(E$from)) if (graph %in% c("sig","significance")) { if (sigSign) { E$weight <- sign0(E$weight) * fdrtool(E$weight,"correlation",plot=FALSE, color.figure=FALSE, verbose=FALSE)$pval } else E$weight <- fdrtool(E$weight,"correlation",plot=FALSE, color.figure=FALSE, verbose=FALSE)$pval } if (bonf) { if (mode=="sig") { E$weight <- E$weight * length(E$weight) E$weight[E$weight > 1] <- 1 E$weight[E$weight < -1] <- -1 } # else warning("Bonferonni correction is only applied if mode='sig'") } if (mode=="sig" & any(E$weight < -1 | E$weight > 1)) { warning("Weights under -1 set to -1 and weights over 1 set to 1") E$weight[E$weight< -1] <- -1 E$weight[E$weight>1] <- 1 } if (mode=="sig") { Pvals <- E$weight E$weight <- sign0(E$weight) * sigScale(abs(E$weight)) } if (OmitInsig) { # if (!require("fdrtool")) stop("`fdrtool' package not found, is it installed?") if (mode != "sig") Pvals <- fdrtool(E$weight,"correlation",plot=FALSE, color.figure=FALSE, verbose=FALSE)$pval E$weight[abs(Pvals) > alpha[length(alpha)]] <- 0 } } else { if (is.matrix(directed)) { incl <- directed|upper.tri(input,diag=TRUE) } else { if (length(directed)>1) { stop("'directed' must be TRUE or FALSE or a matrix containing TRUE or FALSE for each element of the input matrix") } else { if (directed) { incl <- matrix(TRUE,nNodes,nNodes) } else { if (isSymmetric(unname(input))) { incl <- upper.tri(input,diag=TRUE) } else { incl <- matrix(TRUE,nNodes,nNodes) } } directed <- matrix(directed,nNodes,nNodes) } } directed <- directed[incl] E$from=numeric(0) E$to=numeric(0) E$weight=numeric(0) E$from=rep(1:nrow(input),times=nrow(input)) E$to=rep(1:nrow(input),each=nrow(input)) E$weight=c(input) E$from <- E$from[c(incl)] E$to <- E$to[c(incl)] E$weight <- E$weight[c(incl)] if (graph %in% c("sig","significance")) { if (sigSign) { E$weight <- sign0(E$weight) * fdrtool(E$weight,"correlation",plot=FALSE, color.figure=FALSE, verbose=FALSE)$pval } else E$weight <- fdrtool(E$weight,"correlation",plot=FALSE, color.figure=FALSE, verbose=FALSE)$pval } if (bonf) { if (mode=="sig") { E$weight <- E$weight * length(E$weight) E$weight[E$weight > 1] <- 1 E$weight[E$weight < -1] <- -1 } # else warning("Bonferonni correction is only applied if mode='sig'") } if (mode=="sig" & any(E$weight < -1 | E$weight > 1)) { warning("Weights under -1 inputusted to -1 and weights over 1 input adjusted to 1") E$weight[E$weight < -1] <- -1 E$weight[E$weight > 1] <- 1 } if (mode=="sig") { Pvals <- E$weight E$weight <- sign0(E$weight) * sigScale(abs(E$weight)) } if (OmitInsig) { # if (!require("fdrtool")) stop("`fdrtool' package not found, is it installed?") if (mode != "sig") Pvals <- fdrtool(E$weight,"correlation",plot=FALSE, color.figure=FALSE, verbose=FALSE)$pval E$weight[abs(Pvals) > alpha[length(alpha)]] <- 0 } if (is.list(knots)) { knotList <- knots knots <- matrix(0,nNodes,nNodes) for (k in seq_along(knotList)) { knots[knotList[[k]]] <- k } # If undirected, symmetrize: if (all(incl[upper.tri(incl,diag=TRUE)]) & !any(incl[lower.tri(incl)])) { knots <- pmax(knots,t(knots)) } } if (is.matrix(knots)) { knots <- knots[c(incl)] # knots <- knots[E$weight!=0] } if (is.matrix(curve)) { curve <- curve[c(incl)] # curve <- curve[E$weight!=0] } if (is.matrix(parallelEdge)) { parallelEdge <- parallelEdge[c(incl)] # parallelEdge <- parallelEdge[E$weight!=0] } if (is.matrix(parallelAngle)) { parallelAngle <- parallelAngle[c(incl)] # parallelAngle <- parallelAngle[E$weight!=0] } if (is.matrix(bidirectional)) { bidirectional <- bidirectional[c(incl)] # bidirectional <- bidirectional[E$weight!=0] } if (is.matrix(residEdge)) { residEdge <- residEdge[c(incl)] # residEdge <- residEdge[E$weight!=0] } if (is.matrix(CircleEdgeEnd)) { CircleEdgeEnd <- CircleEdgeEnd[c(incl)] # CircleEdgeEnd <- CircleEdgeEnd[E$weight!=0] } if (is.matrix(edge.labels)) { edge.labels <- edge.labels[c(incl)] # edge.labels <- edge.labels[E$weight!=0] } if (is.matrix(edge.color)) { edge.color <- edge.color[c(incl)] # edge.color <- edge.color[E$weight!=0] } if (is.matrix(edge.label.bg)) { edge.label.bg <- edge.label.bg[c(incl)] # edge.label.bg <- edge.label.bg[E$weight!=0] } if (is.matrix(edge.label.margin)) { edge.label.margin <- edge.label.margin[c(incl)] # edge.label.bg <- edge.label.bg[E$weight!=0] } if (is.matrix(edge.label.font)) { edge.label.font <- edge.label.font[c(incl)] # edge.label.font <- edge.label.font[E$weight!=0] } if (is.matrix(fade)) { fade <- fade[c(incl)] # edge.color <- edge.color[E$weight!=0] } if (!is.null(ELcolor)) { if (is.matrix(ELcolor)) { ELcolor <- ELcolor[c(incl)] # ELcolor <- ELcolor[E$weight!=0] } } # if (!is.null(edge.color)) if (length(edge.color) == length(E$weight)) edge.color <- edge.color[E$weight!=0] if (is.matrix(lty)) { lty <- lty[c(incl)] # lty <- lty[E$weight!=0] } if (!is.null(edgeConnectPoints)) { if (is.array(edgeConnectPoints) && isTRUE(dim(edgeConnectPoints)[3]==2)) { edgeConnectPoints <- matrix(edgeConnectPoints[c(incl,incl)],,2) # edgeConnectPoints <- edgeConnectPoints[E$weight!=0,,drop=FALSE] } } if (is.matrix(edge.label.position)) { edge.label.position <- edge.label.position[c(incl)] # edge.label.position <- edge.label.position[E$weight!=0] } } keep <- abs(E$weight)>threshold ###### if (length(loopRotation)==1) loopRotation <- rep(loopRotation,nNodes) if (length(directed)==1) { directed <- rep(directed,length(E$from)) } directed <- directed[keep] if (!is.null(edge.color) && length(edge.color) != sum(keep)) { edge.color <- rep(edge.color,length=length(E$from)) if (length(edge.color) != length(keep)) stop("'edge.color' is wrong length") edge.color <- edge.color[keep] } if (!is.logical(edge.labels)) { if (length(edge.labels) == 1) edge.labels <- rep(edge.labels,length(E$from)) if (length(edge.labels) != length(keep) & length(edge.labels) != sum(keep)) stop("'edge.label.bg' is wrong length") if (length(edge.labels)==length(keep)) edge.labels <- edge.labels[keep] # edge.labels <- rep(edge.labels,length=length(E$from)) } # if (is.logical(edge.label.bg)) # { # edge.label.bg <- "white" # } if (length(edge.label.bg) == 1) edge.label.bg <- rep(edge.label.bg,length(E$from)) if (length(edge.label.bg) != length(keep) & length(edge.label.bg) != sum(keep)) stop("'edge.label.bg' is wrong length") if (length(edge.label.bg)==length(keep)) edge.label.bg <- edge.label.bg[keep] # } if (length(edge.label.margin) == 1) edge.label.margin <- rep(edge.label.margin,length(E$from)) if (length(edge.label.margin) != length(keep) & length(edge.label.margin) != sum(keep)) stop("'edge.label.margin' is wrong length") if (length(edge.label.margin)==length(keep)) edge.label.margin <- edge.label.margin[keep] if (length(edge.label.font) == 1) edge.label.font <- rep(edge.label.font,length(E$from)) if (length(edge.label.font) != length(keep) & length(edge.label.font) != sum(keep)) stop("'edge.label.font' is wrong length") if (length(edge.label.font)==length(keep)) edge.label.font <- edge.label.font[keep] if (length(lty) == 1) lty <- rep(lty,length(E$from)) if (length(lty) != length(keep) & length(lty) != sum(keep)) stop("'lty' is wrong length") if (length(lty)==length(keep)) lty <- lty[keep] if (length(fade) == 1) fade <- rep(fade,length(E$from)) if (length(fade) != length(keep) & length(fade) != sum(keep)) stop("'fade' is wrong length") if (length(fade)==length(keep)) fade <- fade[keep] if (!is.null(edgeConnectPoints)) { if (length(edgeConnectPoints) == 1) edgeConnectPoints <- matrix(rep(edgeConnectPoints,2*length(E$from)),,2) if (nrow(edgeConnectPoints) != length(keep) & nrow(edgeConnectPoints) != sum(keep)) stop("Number of rows in 'edgeConnectPoints' do not match number of edges") if (nrow(edgeConnectPoints)==length(keep)) edgeConnectPoints <- edgeConnectPoints[keep,,drop=FALSE] } if (length(edge.label.position) == 1) edge.label.position <- rep(edge.label.position,length(E$from)) if (length(edge.label.position) != length(keep) & length(edge.label.position) != sum(keep)) stop("'edge.label.position' is wrong length") if (length(edge.label.position)==length(keep)) edge.label.position <- edge.label.position[keep] if (!is.null(ELcolor)) { ELcolor <- rep(ELcolor,length = length(E$from)) ELcolor <- ELcolor[keep] } if (is.list(knots)) { knotList <- knots knots <- rep(0,length(E$from)) for (k in seq_along(knotList)) { knots[knotList[[k]]] <- k } } if (length(knots)==length(keep)) knots <- knots[keep] if (length(bidirectional)==1) { bidirectional <- rep(bidirectional,length(E$from)) } if (length(bidirectional)==length(keep)) bidirectional <- bidirectional[keep] if (length(residEdge)==1) { residEdge <- rep(residEdge,length(E$from)) } if (length(residEdge)==length(keep)) residEdge <- residEdge[keep] if (length(CircleEdgeEnd)==1) { CircleEdgeEnd <- rep(CircleEdgeEnd,length(E$from)) } if (length(CircleEdgeEnd)==length(keep)) CircleEdgeEnd <- CircleEdgeEnd[keep] if (!is.logical(edge.labels)) { if (length(edge.labels)==length(keep)) { edge.labels <- edge.labels[keep] } } if (length(curve)==1) { curve <- rep(curve,length(E$from)) } if (length(curve)==length(keep)) curve <- curve[keep] if (length(parallelEdge)==1) { parallelEdge <- rep(parallelEdge,length(E$from)) } if (length(parallelEdge)==length(keep)) parallelEdge <- parallelEdge[keep] if (length(parallelAngle)==1) { parallelAngle <- rep(parallelAngle,length(E$from)) } if (length(parallelAngle)==length(keep)) parallelAngle <- parallelAngle[keep] E$from=E$from[keep] E$to=E$to[keep] if (mode=="sig") Pvals <- Pvals[keep] E$weight=E$weight[keep] ## Define cut: if (defineCut) { if (length(E$weight) > 3*nNodes) { # cut <- median(sort(E$weight,decreasing=TRUE)[seq_len(nNodes)]) cut <- max(sort(abs(E$weight),decreasing=TRUE)[2*nNodes], quantile(abs(E$weight),0.75)) } else if (length(E$weight) > 1) cut <- quantile(abs(E$weight),0.75) else cut <- 0 # cut <- quantile(abs(E$weight), cutQuantile) } if (length(E$from) > 0) { maximum=max(abs(c(maximum,max(abs(E$weight)),cut,abs(diagWeights)))) } else maximum = 1 if (cut==0) { avgW=(abs(E$weight)-minimum)/(maximum-minimum) } else if (maximum>cut) avgW=(abs(E$weight)-cut)/(maximum-cut) else avgW=rep(0,length(E$from)) avgW[avgW<0]=0 edgesort=sort(abs(E$weight),index.return=TRUE)$ix edge.width=rep(1,length(E$weight)) # lty and curve settings: if (length(lty)==1) lty=rep(lty,length(E$from)) if (length(edge.label.position)==1) edge.label.position=rep(edge.label.position,length(E$from)) # Make bidirectional vector: if (length(bidirectional)==1) bidirectional=rep(bidirectional,length(E$from)) if (length(bidirectional)!=length(E$from)) stop("Bidirectional vector must be of length 1 or equal to the number of edges") srt <- cbind(pmin(E$from,E$to), pmax(E$from,E$to) , knots, abs(E$weight) > minimum) if (!curveAll | any(parallelEdge)) { dub <- duplicated(srt)|duplicated(srt,fromLast=TRUE) if (!curveAll) { if (length(curve)==1) curve <- rep(curve,length(E$from)) curve <- ifelse(is.na(curve),ifelse(knots==0&dub&!bidirectional&is.na(curve),ifelse(E$from==srt[,1],1,-1) * ave(1:nrow(srt),srt[,1],srt[,2],bidirectional,FUN=function(x)seq(curveDefault,-curveDefault,length=length(x))),0),curve) } if (any(parallelEdge)) { # Set parallelAngle value: parallelAngle <- ifelse(is.na(parallelAngle),ifelse(knots==0&dub&!bidirectional&is.na(parallelAngle),ifelse(E$from==srt[,1],1,-1) * ave(1:nrow(srt),srt[,1],srt[,2],bidirectional,FUN=function(x)seq(parallelAngleDefault,-parallelAngleDefault,length=length(x))),0),parallelAngle) } rm(dub) } parallelAngle[is.na(parallelAngle)] <- 0 # Layout settings: if (nNodes == 1 & isTRUE(rescale)) { layout <- matrix(0,1,2) } else { if (is.null(layout)) layout="default" if (!is.matrix(layout)) { # If function, assume igraph function (todo: check this) if (is.function(layout)) { Graph <- graph.edgelist(as.matrix(cbind(E$from,E$to)), any(directed)) E(Graph)$weight <- E$weight # set roots: if (deparse(match.call()[['layout']]) == "layout.reingold.tilford" && is.null(layout.par[['root']])) { sp <- shortest.paths(Graph, mode = "out") diag(sp) <- Inf # Find root nodes: roots <- which(colSums(sp==Inf) == nrow(sp)) # Find roots with longest outgoing paths: maxs <- sapply(roots,function(x)max(sp[x,sp[x,]!=Inf])) layout.par[['root']] <- roots[maxs==max(maxs)] } layout <- do.call(layout,c(list(graph = Graph),layout.par)) } else { if (length(layout) > 1) stop("Incorrect specification of layout.") if (layout=="default" & (any(directed) | !weighted)) layout="spring" if (layout=="default" | layout=="circular" | layout=="circle" | layout=="groups") { if (is.null(groups) | layout == "circle") { layout=matrix(0,nrow=nNodes,ncol=2) tl=nNodes+1 layout[,1]=sin(seq(0,2*pi, length=tl))[-tl] layout[,2]=cos(seq(0,2*pi, length=tl))[-tl] } else { if (is.null(rotation)) rotation=rep(0,length=length(groups)) l1=matrix(0,nrow=length(groups),ncol=2) tl=nrow(l1)+1 l1[,1]=sin(seq(0,2*pi, length=tl))[-tl] l1[,2]=cos(seq(0,2*pi, length=tl))[-tl] l1=l1*length(groups)*layout.control layout=matrix(0,nrow=nNodes,ncol=2) for (i in 1:length(groups)) { tl=length(groups[[i]])+1 layout[groups[[i]],1]=repulsion*sin(seq(rotation[i],rotation[i]+2*pi, length=tl))[-tl]+l1[i,1] layout[groups[[i]],2]=repulsion*cos(seq(rotation[i],rotation[i]+2*pi, length=tl))[-tl]+l1[i,2] } } } else if (layout=="spring") { if (length(E$weight) > 0) { if (mode != "sig") { layout=qgraph.layout.fruchtermanreingold(cbind(E$from,E$to),abs(E$weight/max(abs(E$weight)))^2,nNodes,rotation=rotation,layout.control=layout.control, niter=layout.par$niter,max.delta=layout.par$max.delta,area=layout.par$area,cool.exp=layout.par$cool.exp,repulse.rad=layout.par$repulse.rad,init=layout.par$init, constraints=layout.par$constraints) } else { layout=qgraph.layout.fruchtermanreingold(cbind(E$from,E$to),abs(E$weight),nNodes,rotation=rotation,layout.control=layout.control, niter=layout.par$niter,max.delta=layout.par$max.delta,area=layout.par$area,cool.exp=layout.par$cool.exp,repulse.rad=layout.par$repulse.rad,init=layout.par$init, constraints=layout.par$constraints) } } else { if (mode != "sig") { layout=qgraph.layout.fruchtermanreingold(cbind(E$from,E$to),numeric(0),nNodes,rotation=rotation,layout.control=layout.control, niter=layout.par$niter,max.delta=layout.par$max.delta,area=layout.par$area,cool.exp=layout.par$cool.exp,repulse.rad=layout.par$repulse.rad,init=layout.par$init, constraints=layout.par$constraints) } else { layout=qgraph.layout.fruchtermanreingold(cbind(E$from,E$to),numeric(0),nNodes,rotation=rotation,layout.control=layout.control, niter=layout.par$niter,max.delta=layout.par$max.delta,area=layout.par$area,cool.exp=layout.par$cool.exp,repulse.rad=layout.par$repulse.rad,init=layout.par$init, constraints=layout.par$constraints) } } } } } # Layout matrix: if (is.matrix(layout)) if (ncol(layout)>2) { layout[is.na(layout)] <- 0 # If character and labels exist, replace: if (is.character(layout) && is.character(labels)) { layout[] <- match(layout,labels) layout[is.na(layout)] <- 0 mode(layout) <- 'numeric' } # Check: if (!all(seq_len(nNodes) %in% layout)) stop("Grid matrix does not contain a placement for every node.") if (any(sapply(seq_len(nNodes),function(x)sum(layout==x))>1)) stop("Grid matrix contains a double entry.") Lmat=layout LmatX=seq(-1,1,length=ncol(Lmat)) LmatY=seq(1,-1,length=nrow(Lmat)) layout=matrix(0,nrow=nNodes,ncol=2) loc <- t(sapply(1:nNodes,function(x)which(Lmat==x,arr.ind=T))) layout <- cbind(LmatX[loc[,2]],LmatY[loc[,1]]) } } # Rescale layout: l=original.layout=layout if (rescale) { if (aspect) { # center: l[,1] <- l[,1] - mean(l[,1]) l[,2] <- l[,2] - mean(l[,2]) lTemp <- l if (length(unique(lTemp[,1]))>1) { l[,1]=(lTemp[,1]-min(lTemp))/(max(lTemp)-min(lTemp))*2-1 } else l[,1] <- 0 if (length(unique(lTemp[,2]))>1) { l[,2]=(lTemp[,2]-min(lTemp))/(max(lTemp)-min(lTemp))*2-1 } else l[,2] <- 0 rm(lTemp) # # Equalize white space: # if (diff(range(l[,1])) < 2) # { # l[,1] <- diff(range(l[,1]))/2 + l[,1] # } # if (diff(range(l[,2])) < 2) # { # l[,2] <- (2-diff(range(l[,2])))/2 + l[,2] # } layout=l } else { if (length(unique(l[,1]))>1) { l[,1]=(l[,1]-min(l[,1]))/(max(l[,1])-min(l[,1]))*2-1 } else l[,1] <- 0 if (length(unique(l[,2]))>1) { l[,2]=(l[,2]-min(l[,2]))/(max(l[,2])-min(l[,2]))*2-1 } else l[,2] <- 0 layout=l } } ## Offset and scale: if (length(layoutScale) == 1) layoutScale <- rep(layoutScale,2) if (length(layoutOffset) == 1) layoutOffset <- rep(layoutOffset,2) layout[,1] <- layout[,1] * layoutScale[1] + layoutOffset[1] layout[,2] <- layout[,2] * layoutScale[2] + layoutOffset[2] l <- layout # Set Edge widths: if (mode=="direct") { edge.width <- abs(E$weight) } else { if (weighted) { edge.width <- avgW*(esize-1)+1 edge.width[edge.width<1]=1 } else { edge.width <- rep(esize,length(E$weight)) } } # # Set edge colors: # if (is.null(edge.color) || (any(is.na(edge.color)) || fade)) # { # if (!is.null(edge.color)) # { # repECs <- TRUE # ectemp <- edge.color # } else repECs <- FALSE # # col <- rep(1,length(E$from)) # # if (weighted) # { # #Edge color: # edge.color=rep("#00000000",length(E$from)) # # # if (mode=="strength"|mode=="direct") # { # if (cut==0) # { # col=(abs(E$weight)-minimum)/(maximum-minimum) # } else # { # col=(abs(E$weight)-minimum)/(cut-minimum) # } # col[col>1]=1 # col[col<0]=0 # if (!gray) # { # if (transparency) # { # col=col^(2) # neg=col2rgb(rgb(0.75,0,0))/255 # pos=col2rgb(rgb(0,0.6,0))/255 # # # Set colors for edges over cutoff: # edge.color[E$weight< -1* minimum] <- rgb(neg[1],neg[2],neg[3],col[E$weight< -1*minimum]) # edge.color[E$weight> minimum] <- rgb(pos[1],pos[2],pos[3],col[E$weight> minimum]) # } else # { # edge.color[E$weight>minimum]=rgb(1-col[E$weight > minimum],1-(col[E$weight > minimum]*0.25),1-col[E$weight > minimum]) # edge.color[E$weight< -1*minimum]=rgb(1-(col[E$weight < (-1)*minimum]*0.25),1-col[E$weight < (-1)*minimum],1-col[E$weight < (-1)*minimum]) # } # } else # { # if (transparency) # { # col=col^(2) # neg="gray10" # pos="gray10" # # # Set colors for edges over cutoff: # edge.color[E$weight< -1* minimum] <- rgb(neg[1],neg[2],neg[3],col[E$weight< -1*minimum]) # edge.color[E$weight> minimum] <- rgb(pos[1],pos[2],pos[3],col[E$weight> minimum]) # } else # { # edge.color[E$weight>minimum]=rgb(1-col[E$weight > minimum],1-(col[E$weight > minimum]),1-col[E$weight > minimum]) # edge.color[E$weight< -1*minimum]=rgb(1-(col[E$weight < (-1)*minimum]),1-col[E$weight < (-1)*minimum],1-col[E$weight < (-1)*minimum]) # } # } # } # if (mode == "sig") # { # # if (!gray) # { # # # Set colors for edges over sig > 0.01 : # if (length(alpha) > 3) edge.color[Pvals > 0 & Pvals < alpha[4] & E$weight > minimum] <- "cadetblue1" # # Set colors for edges over sig > 0.01 : # if (length(alpha) > 2) edge.color[Pvals > 0 & Pvals < alpha[3] & E$weight > minimum] <- "#6495ED" # # Set colors for edges over sig > 0.01 : # if (length(alpha) > 1) edge.color[Pvals > 0 & Pvals < alpha[2] & E$weight > minimum] <- "blue" # # Set colors for edges over sig < 0.01 : # edge.color[Pvals > 0 & Pvals < alpha[1] & E$weight > minimum] <- "darkblue" # # # Set colors for edges over sig > 0.01 : # if (length(alpha) > 3) edge.color[Pvals < 0 & Pvals > (-1 * alpha[4]) & E$weight < -1 * minimum] <- rgb(1,0.8,0.4) # # Set colors for edges over sig > 0.01 : # if (length(alpha) > 2) edge.color[Pvals < 0 & Pvals > (-1 * alpha[3]) & E$weight < -1 * minimum] <- "orange" # # Set colors for edges over sig > 0.01 : # if (length(alpha) > 1) edge.color[Pvals < 0 & Pvals > (-1 * alpha[2]) & E$weight < -1 * minimum] <- "darkorange" # # Set colors for edges over sig < 0.01 : # edge.color[Pvals < 0 & Pvals > (-1 * alpha[1]) & E$weight < -1 * minimum] <- "darkorange2" # # # # # } else # { # Pvals <- abs(Pvals) # # Set colors for edges over sig < 0.01 : # if (length(alpha) > 3) edge.color[Pvals > 0 & Pvals < alpha[4] & E$weight > minimum] <- rgb(0.7,0.7,0.7) # if (length(alpha) > 2) edge.color[Pvals > 0 & Pvals < alpha[3] & E$weight > minimum] <- rgb(0.5,0.5,0.5) # if (length(alpha) > 1) edge.color[Pvals > 0 & Pvals < alpha[2] & E$weight > minimum] <- rgb(0.3,0.3,0.3) # edge.color[Pvals > 0 & Pvals < alpha[1] & E$weight > minimum] <- "black" # # } # } # if (cut!=0) # { # if (!gray & (mode=="strength"|mode=="direct")) # { # # Set colors for edges over cutoff: # edge.color[E$weight<= -1*cut] <- "red" # edge.color[E$weight>= cut] <- "darkgreen" # } else if (gray) # { # # Set colors for edges over cutoff: # edge.color[E$weight<= -1*cut] <- "black" # edge.color[E$weight>= cut] <- "black" # # } # } # # } else # { # if (!is.logical(transparency)) Trans=transparency else Trans=1 # edge.color=rep(rgb(0.5,0.5,0.5,Trans),length(edgesort)) # } # if (repECs) # { # ## Add trans: # if (fade & any(!is.na(ectemp))) # { # if (!is.logical(transparency)) col <- rep(transparency,length(col)) # edge.color[!is.na(ectemp)] <- addTrans(ectemp[!is.na(ectemp)],round(255*col[!is.na(ectemp)])) # } else { # edge.color[!is.na(ectemp)] <- ectemp[!is.na(ectemp)] # } # rm(ectemp) # } # } else { # if (length(edge.color) == 1) edge.color <- rep(edge.color,length(E$from)) # if (length(edge.color) != length(E$from)) stop("Number of edge colors not equal to number of edges") # } # Set edge colors: if (is.null(edge.color) || (any(is.na(edge.color)) || any(is.na(fade)) || any(fade != 1))) { if (!is.null(edge.color)) { repECs <- TRUE ectemp <- edge.color } else repECs <- FALSE # col vector will contain relative strength: col <- rep(1,length(E$from)) if (weighted) { # Dummmy vector containing invisible edges: edge.color <- rep("#00000000",length(E$from)) # Normal color scheme (0 is invisible, stronger is more visible) if (mode=="strength"|mode=="direct") { # Set relative strength: if (cut==0) { col <- (abs(E$weight)-minimum)/(maximum-minimum) } else { if (cut > minimum){ col <- (abs(E$weight)-minimum)/(cut-minimum) } else { col <- ifelse(abs(E$weight) > minimum, 1, 0) } } col[col>1] <- 1 col[col<0] <- 0 col <- col^colFactor # Set edges between minimum and cut: # if (autoFade) # { if (isTRUE(transparency)) { edge.color[E$weight > minimum] <- addTrans(posCol[1],round(ifelse(is.na(fade),col,fade)[E$weight > minimum]*255)) edge.color[E$weight < -1*minimum] <- addTrans(negCol[1],round(ifelse(is.na(fade),col,fade)[E$weight < -1*minimum]*255)) } else { edge.color[E$weight > minimum] <- Fade(posCol[1],ifelse(is.na(fade),col,fade)[E$weight > minimum], background) edge.color[E$weight < -1*minimum] <- Fade(negCol[1],ifelse(is.na(fade),col,fade)[E$weight < -1*minimum], background) } # } # else { # if (isTRUE(transparency)) # { # edge.color[E$weight > minimum] <- addTrans(posCol[1],round(fade[E$weight > minimum]*255)) # edge.color[E$weight < -1*minimum] <- addTrans(negCol[1],round(fade[E$weight < -1*minimum]*255)) # } else { # edge.color[E$weight > minimum] <- Fade(posCol[1],fade[E$weight > minimum], background) # edge.color[E$weight < -1*minimum] <- Fade(negCol[1],fade[E$weight < -1*minimum], background) # } # edge.color[E$weight > minimum] <- posCol[1] # edge.color[E$weight < -1*minimum] <- negCol[1] # } # Set colors over cutoff if cut != 0: if (cut!=0) { # Old code: # if (posCol[1]!=posCol[2]) edge.color[E$weight >= cut] <- posCol[2] # if (negCol[1]!=negCol[2]) edge.color[E$weight <= -1*cut] <- negCol[2] # New code (1.9.7) edge.color[E$weight >= cut & abs(E$weight) >= minimum] <- posCol[2] edge.color[E$weight <= -1*cut & abs(E$weight) >= minimum] <- negCol[2] } } if (mode == "sig") { if (!gray) { # Set colors for edges over sig > 0.01 : if (length(alpha) > 3) edge.color[Pvals >= 0 & Pvals < alpha[4] & E$weight > minimum] <- "cadetblue1" # Set colors for edges over sig > 0.01 : if (length(alpha) > 2) edge.color[Pvals >= 0 & Pvals < alpha[3] & E$weight > minimum] <- "#6495ED" # Set colors for edges over sig > 0.01 : if (length(alpha) > 1) edge.color[Pvals >= 0 & Pvals < alpha[2] & E$weight > minimum] <- "blue" # Set colors for edges over sig < 0.01 : edge.color[Pvals >= 0 & Pvals < alpha[1] & E$weight > minimum] <- "darkblue" # Set colors for edges over sig > 0.01 : if (length(alpha) > 3) edge.color[Pvals < 0 & Pvals > (-1 * alpha[4]) & E$weight < -1 * minimum] <- rgb(1,0.8,0.4) # Set colors for edges over sig > 0.01 : if (length(alpha) > 2) edge.color[Pvals < 0 & Pvals > (-1 * alpha[3]) & E$weight < -1 * minimum] <- "orange" # Set colors for edges over sig > 0.01 : if (length(alpha) > 1) edge.color[Pvals < 0 & Pvals > (-1 * alpha[2]) & E$weight < -1 * minimum] <- "darkorange" # Set colors for edges over sig < 0.01 : edge.color[Pvals < 0 & Pvals > (-1 * alpha[1]) & E$weight < -1 * minimum] <- "darkorange2" } else { Pvals <- abs(Pvals) # Set colors for edges over sig < 0.01 : if (length(alpha) > 3) edge.color[Pvals > 0 & Pvals < alpha[4] & E$weight > minimum] <- rgb(0.7,0.7,0.7) if (length(alpha) > 2) edge.color[Pvals > 0 & Pvals < alpha[3] & E$weight > minimum] <- rgb(0.5,0.5,0.5) if (length(alpha) > 1) edge.color[Pvals > 0 & Pvals < alpha[2] & E$weight > minimum] <- rgb(0.3,0.3,0.3) edge.color[Pvals > 0 & Pvals < alpha[1] & E$weight > minimum] <- "black" } } } else { if (!is.logical(transparency)) Trans <- transparency else Trans <- 1 edge.color <- rep(addTrans(unCol,round(255*Trans)),length(edgesort)) } if (repECs) { # Colors to fade: indx <- !is.na(ectemp) & is.na(fade) ## Add trans: if (any(is.na(fade)) & any(!is.na(ectemp))) { # Replace all edge colors: edge.color[!is.na(ectemp)] <- ectemp[!is.na(ectemp)] if (!is.logical(transparency)) col <- rep(transparency,length(col)) if (isTRUE(transparency)) { edge.color[indx] <- addTrans(ectemp[indx],round(255*col[indx])) } else { edge.color[indx] <- Fade(ectemp[indx],col[indx], background) } } else { edge.color[indx] <- ectemp[indx] } rm(ectemp) rm(indx) } } else { if (length(edge.color) == 1) edge.color <- rep(edge.color,length(E$from)) if (length(edge.color) != length(E$from)) stop("Number of edge colors not equal to number of edges") } # Vertex color: # if (is.null(color) & !is.null(groups)) # { # if (!gray) # { # if (pastel) # { # color <- rainbow_hcl(length(groups), start = rainbowStart * 360, end = (360 * rainbowStart + 360*(length(groups)-1)/length(groups))) # } else { # color <- rainbow(length(groups), start = rainbowStart, end = (rainbowStart + (max(1.1,length(groups)-1))/length(groups)) %% 1) # } # } # if (gray) color <- sapply(seq(0.2,0.8,length=length(groups)),function(x)rgb(x,x,x)) # } if (is.null(color) & !is.null(groups)) { if (is.function(palette)){ color <- palette(length(groups)) } else if (palette == "rainbow"){ color <- rainbow(length(groups), start = rainbowStart, end = (rainbowStart + (max(1.1,length(groups)-1))/length(groups)) %% 1) } else if (palette == "gray" | palette == "grey"){ color <- shadesOfGrey(length(groups)) } else if (palette == "colorblind"){ color <- colorblind(length(groups)) } else if (palette == "R"){ color <- seq_len(length(groups)) } else if (palette == "ggplot2"){ color <- ggplot_palette(length(groups)) } else if (palette == "pastel"){ color <- rainbow_hcl(length(groups), start = rainbowStart * 360, end = (360 * rainbowStart + 360*(length(groups)-1)/length(groups))) } else if (palette == "neon"){ color <- neon(length(groups)) } else if (palette == "pride"){ if (length(groups) > 7){ color <- rainbow(length(groups), start = rainbowStart, end = (rainbowStart + (max(1.1,length(groups)-1))/length(groups)) %% 1) } else { pridecols <- c("#E50000","#FF8D00","#FFEE00","#028121","#004CFF","#760088") # Reorder: startcol <- round(1 + rainbowStart * 6) sequence <- startcol:(startcol+length(groups))%%6 sequence[sequence==0] <- 6 color <- pridecols[sequence] } } else stop(paste0("Palette '",palette,"' is not supported.")) } # Default color: if (is.null(color)) color <- "background" vertex.colors <- rep(color, length=nNodes) if (!is.null(groups)) { vertex.colors <- rep("background", length=nNodes) for (i in 1:length(groups)) vertex.colors[groups[[i]]]=color[i] } else vertex.colors <- rep(color, length=nNodes) if (length(color)==nNodes) vertex.colors <- color if (all(col2rgb(background,TRUE) == col2rgb("transparent",TRUE))) { vertex.colors[vertex.colors=="background"] <- "white" } else vertex.colors[vertex.colors=="background"] <- background # Label color: if (length(lcolor) != nNodes){ lcolor <- rep(lcolor,nNodes) } if (any(is.na(lcolor))){ # if (!is.null(theme) && is.character(theme) && theme == "gray"){ # browser() # lcolor[is.na(lcolor)] <- ifelse(vertex.colors == "background", # ifelse(mean(col2rgb(background)/255) > 0.5,"black","white"), # ifelse(colMeans(col2rgb(vertex.colors[is.na(lcolor)])) > 0.5,"black","white") # ) # } else { lcolor[is.na(lcolor)] <- ifelse(vertex.colors == "background", ifelse(mean(col2rgb(background)/255) > 0.5,"black","white"), ifelse(colMeans(col2rgb(vertex.colors[is.na(lcolor)])/255) > label.color.split,"black","white") ) # } } # Dummy groups list: if (is.null(groups)) { groups <- list(1:nNodes) } # Scores: if (!is.null(scores)) { if (length(scores)!=nNodes) { warning ("Length of scores is not equal to nuber of items") } else { bcolor <- vertex.colors if (is.null(scores.range)) scores.range=c(min(scores),max(scores)) scores[is.na(scores)]=scores.range[1] rgbmatrix=1-t(col2rgb(vertex.colors)/255) for (i in 1:nNodes) rgbmatrix[i,]=rgbmatrix[i,] * (scores[i]-scores.range[1] ) / (scores.range[2]-scores.range[1] ) vertex.colors=rgb(1-rgbmatrix) } } if (diagCols) { if (diagCols & !is.null(scores)) stop("Multiple modes specified for vertex colors (diag and scores)") if (diagCols & weighted) { if (is.null(bcolor) & !all(vertex.colors=="white")) bcolor=vertex.colors if (cut==0) { colV=(abs(diagWeights)-minimum)/(maximum-minimum) } else { colV=(abs(diagWeights)-minimum)/(cut-minimum) } colV[colV>1]=1 colV[colV<0]=0 if (transparency) { vertex.colors=rep("#00000000",nNodes) colV=colV^(2) neg=col2rgb(rgb(0.75,0,0))/255 pos=col2rgb(rgb(0,0.6,0))/255 # Set colors for edges over cutoff: vertex.colors[diagWeights< -1* minimum] <- rgb(neg[1],neg[2],neg[3],colV[diagWeights< -1*minimum]) vertex.colors[diagWeights> minimum] <- rgb(pos[1],pos[2],pos[3],colV[diagWeights> minimum]) } else { vertex.colors=rep("white",nNodes) vertex.colors[diagWeights>minimum]=rgb(1-colV[diagWeights > minimum],1-(colV[diagWeights> minimum]*0.25),1-colV[diagWeights > minimum]) vertex.colors[diagWeights< -1*minimum]=rgb(1-(colV[diagWeights< (-1)*minimum]*0.25),1-colV[diagWeights < (-1)*minimum],1-colV[diagWeights < (-1)*minimum]) } if (cut!=0) { # Set colors for edges over cutoff: vertex.colors[diagWeights<= -1*cut] <- "red" vertex.colors[diagWeights>= cut] <- "darkgreen" } } } if (is.null(bcolor)) { bcolor <- rep(ifelse(mean(col2rgb(background)/255)>0.5,"black","white"),nNodes) } else { bcolor <- rep(bcolor,length=nNodes) } if (any(vTrans<255) || length(vTrans) > 1) { if ( length(vTrans) > 1 && length(vTrans) != nNodes) {vTrans <- 255} # Transparance in vertex colors: num2hex <- function(x) { hex=unlist(strsplit("0123456789ABCDEF",split="")) return(paste(hex[(x-x%%16)/16+1],hex[x%%16+1],sep="")) } colHEX <- rgb(t(col2rgb(vertex.colors)/255)) vertex.colors <- paste(sapply(strsplit(colHEX,split=""),function(x)paste(x[1:7],collapse="")),num2hex(vTrans),sep="") } # Vertex size: if (length(vsize)==1) vsize=rep(vsize,nNodes) if (length(vsize2)==1) vsize2=rep(vsize2,nNodes) if (!edgelist) Vsums=rowSums(abs(input))+colSums(abs(input)) if (edgelist) { Vsums=numeric(0) for (i in 1:nNodes) Vsums[i]=sum(c(input[,1:2])==i) } if (length(vsize)==2 & nNodes>2 & length(unique(Vsums))>1) vsize=vsize[1] + (vsize[2]-vsize[1]) * (Vsums-min(Vsums))/(max(Vsums)-min(Vsums)) if (length(vsize)==2 & nNodes>2 & length(unique(Vsums))==1) vsize=rep(mean(vsize),nNodes) if (length(vsize2)==2 & nNodes>2 & length(unique(Vsums))>1) vsize2=vsize2[1] + (vsize2[2]-vsize2[1]) * (Vsums-min(Vsums))/(max(Vsums)-min(Vsums)) if (length(vsize2)==2 & nNodes>2 & length(unique(Vsums))==1) vsize2=rep(mean(vsize2),nNodes) # Vertex shapes: if (length(shape)==1) shape=rep(shape,nNodes) # means: if (length(means)==1) means <- rep(means,nNodes) if (length(SDs)==1) SDs <- rep(SDs, nNodes) # # pch1=numeric(0) # pch2=numeric(0) # # for (i in 1:length(shape)) # { # if (shape[i]=="circle") # { # pch1[i]=16 # pch2[i]=1 # } # if (shape[i]=="square") # { # pch1[i]=15 # pch2[i]=0 # } # if (shape[i]=="triangle") # { # pch1[i]=17 # pch2[i]=2 # } # if (shape[i]=="diamond") # { # pch1[i]=18 # pch2[i]=5 # } # if (!shape[i]%in%c("circle","square","triangle","diamond","rectangle")) stop(paste("Shape",shape[i],"is not supported")) # } # # Arrow sizes: if (length(asize)==1) asize=rep(asize,length(E$from)) if (length(asize)!=length(E$from)) warning("Length of 'asize' is not equal to the number of edges") # Edge labels: # Make labels: if (!is.logical(edge.labels)) { # edge.labels=as.character(edge.labels) if (length(edge.labels)!=length(E$from)) { warning("Number of edge labels did not correspond to number of edges, edge labes have been ommited") edge.labels <- FALSE } if (length(edge.labels) > 0 & is.character(edge.labels)) { edge.labels[edge.labels=="NA"]="" } } else { if (edge.labels) { edge.labels= as.character(round(E$weight,2)) } else edge.labels <- rep('',length(E$from)) } if (is.numeric(edge.labels)) edge.labels <- as.character(edge.labels) # Bars: length(bars) <- nNodes barSide <- rep(barSide,nNodes) barColor <- rep(barColor, nNodes) barLength <- rep(barLength, nNodes) barColor[barColor == 'border'] <- bcolor[barColor == 'border'] # Compute loopRotation: if (DoNotPlot) { loopRotation[is.na(loopRotation)] <- 0 } else { for (i in seq_len(nNodes)) { if (is.na(loopRotation[i])) { centX <- mean(layout[,1]) centY <- mean(layout[,2]) for (g in 1:length(groups)) { if (i%in%groups[[g]] & length(groups[[g]]) > 1) { centX <- mean(layout[groups[[g]],1]) centY <- mean(layout[groups[[g]],2]) } } loopRotation[i] <- atan2usr2in(layout[i,1]-centX,layout[i,2]-centY) if (shape[i]=="square") { loopRotation[i] <- c(0,0.5*pi,pi,1.5*pi)[which.min(abs(c(0,0.5*pi,pi,1.5*pi)-loopRotation[i]%%(2*pi)))] } } } } # Node names: if (is.null(nodeNames)) nodeNames <- labels # Make labels: if (is.logical(labels)) { if (labels) { labels=1:nNodes } else { labels <- rep('',nNodes) } } border.width <- rep(border.width, nNodes) # Node argument setup: borders <- rep(borders,length=nNodes) label.font <- rep(label.font,length=nNodes) # Make negative dashed: if (negDashed){ lty[] <- ifelse(E$weight < 0, 2, 1) } ########### SPLIT HERE ########### ### Fill qgraph object with stuff: ## Edgelist: qgraphObject$Edgelist$from <- E$from qgraphObject$Edgelist$to <- E$to qgraphObject$Edgelist$weight <- E$weight qgraphObject$Edgelist$directed <- directed qgraphObject$Edgelist$bidirectional <- bidirectional # Nodes: qgraphObject$graphAttributes$Nodes$border.color <- bcolor qgraphObject$graphAttributes$Nodes$borders <- borders qgraphObject$graphAttributes$Nodes$border.width <- border.width qgraphObject$graphAttributes$Nodes$label.cex <- label.cex qgraphObject$graphAttributes$Nodes$label.font <- label.font qgraphObject$graphAttributes$Nodes$label.color <- lcolor qgraphObject$graphAttributes$Nodes$labels <- labels qgraphObject$graphAttributes$Nodes$names <- nodeNames qgraphObject$graphAttributes$Nodes$loopRotation <- loopRotation qgraphObject$graphAttributes$Nodes$shape <- shape qgraphObject$graphAttributes$Nodes$color <- vertex.colors qgraphObject$graphAttributes$Nodes$width <- vsize qgraphObject$graphAttributes$Nodes$height <- vsize2 qgraphObject$graphAttributes$Nodes$subplots <- subplots qgraphObject$graphAttributes$Nodes$images <- images # qgraphObject$graphAttributes$Nodes$tooltips <- tooltips # qgraphObject$graphAttributes$Nodes$SVGtooltips <- SVGtooltips qgraphObject$graphAttributes$Nodes$bars <- bars qgraphObject$graphAttributes$Nodes$barSide <- barSide qgraphObject$graphAttributes$Nodes$barColor <- barColor qgraphObject$graphAttributes$Nodes$barLength <- barLength qgraphObject$graphAttributes$Nodes$means <- means qgraphObject$graphAttributes$Nodes$SDs <- SDs qgraphObject$graphAttributes$Nodes$node.label.offset <- node.label.offset qgraphObject$graphAttributes$Nodes$node.label.position <- node.label.position # Pies: qgraphObject$graphAttributes$Nodes$pieColor <- pieColor qgraphObject$graphAttributes$Nodes$pieColor2 <- pieColor2 qgraphObject$graphAttributes$Nodes$pieBorder <- pieBorder qgraphObject$graphAttributes$Nodes$pie <- pie qgraphObject$graphAttributes$Nodes$pieStart <- pieStart qgraphObject$graphAttributes$Nodes$pieDarken <- pieDarken # Edges: qgraphObject$graphAttributes$Edges$curve <- curve qgraphObject$graphAttributes$Edges$color <- edge.color qgraphObject$graphAttributes$Edges$labels <- edge.labels qgraphObject$graphAttributes$Edges$label.cex <- edge.label.cex qgraphObject$graphAttributes$Edges$label.bg <- edge.label.bg qgraphObject$graphAttributes$Edges$label.margin <- edge.label.margin qgraphObject$graphAttributes$Edges$label.font <- edge.label.font qgraphObject$graphAttributes$Edges$label.color <- ELcolor qgraphObject$graphAttributes$Edges$width <- edge.width qgraphObject$graphAttributes$Edges$lty <- lty qgraphObject$graphAttributes$Edges$fade <- fade qgraphObject$graphAttributes$Edges$edge.label.position <- edge.label.position qgraphObject$graphAttributes$Edges$residEdge <- residEdge qgraphObject$graphAttributes$Edges$CircleEdgeEnd <- CircleEdgeEnd qgraphObject$graphAttributes$Edges$asize <- asize if (mode == "sig") qgraphObject$graphAttributes$Edges$Pvals <- Pvals else Pvals <- NULL qgraphObject$graphAttributes$Edges$parallelEdge <- parallelEdge qgraphObject$graphAttributes$Edges$parallelAngle <- parallelAngle qgraphObject$graphAttributes$Edges$edgeConnectPoints <- edgeConnectPoints # Knots: qgraphObject$graphAttributes$Knots$knots <- knots qgraphObject$graphAttributes$Knots$knot.size <- knot.size qgraphObject$graphAttributes$Knots$knot.color <- knot.color qgraphObject$graphAttributes$Knots$knot.borders <- knot.borders qgraphObject$graphAttributes$Knots$knot.border.color <- knot.border.color qgraphObject$graphAttributes$Knots$knot.border.width <- knot.border.width # Graph: qgraphObject$graphAttributes$Graph$nNodes <- nNodes qgraphObject$graphAttributes$Graph$weighted <- weighted qgraphObject$graphAttributes$Graph$edgesort <- edgesort qgraphObject$graphAttributes$Graph$scores <- scores qgraphObject$graphAttributes$Graph$scores.range <- scores.range qgraphObject$graphAttributes$Graph$groups <- groups qgraphObject$graphAttributes$Graph$minimum <- minimum qgraphObject$graphAttributes$Graph$maximum <- maximum qgraphObject$graphAttributes$Graph$cut <- cut qgraphObject$graphAttributes$Graph$polygonList <- polygonList qgraphObject$graphAttributes$Graph$mode <- mode qgraphObject$graphAttributes$Graph$color <- color # Layout: qgraphObject$layout <- layout qgraphObject$layout.orig <- original.layout # Plot options: qgraphObject$plotOptions$filetype <- filetype qgraphObject$plotOptions$filename <- filename qgraphObject$plotOptions$background <- background qgraphObject$plotOptions$bg <- bg qgraphObject$plotOptions$normalize <- normalize qgraphObject$plotOptions$plot <- plot qgraphObject$plotOptions$mar <- mar qgraphObject$plotOptions$GLratio <- GLratio qgraphObject$plotOptions$legend <- legend qgraphObject$plotOptions$legend.cex <- legend.cex qgraphObject$plotOptions$pty <- pty qgraphObject$plotOptions$XKCD <- XKCD qgraphObject$plotOptions$residuals <- residuals qgraphObject$plotOptions$residScale <- residScale qgraphObject$plotOptions$arrows <- arrows qgraphObject$plotOptions$arrowAngle <- arrowAngle qgraphObject$plotOptions$open <- open qgraphObject$plotOptions$curvePivot <- curvePivot qgraphObject$plotOptions$curveShape <- curveShape qgraphObject$plotOptions$curveScale <- curveScale qgraphObject$plotOptions$curveScaleNodeCorrection <- curveScaleNodeCorrection qgraphObject$plotOptions$curvePivotShape <- curvePivotShape qgraphObject$plotOptions$label.scale <- label.scale qgraphObject$plotOptions$label.scale.equal <- label.scale.equal qgraphObject$plotOptions$label.fill.vertical <- label.fill.vertical qgraphObject$plotOptions$label.fill.horizontal <- label.fill.horizontal qgraphObject$plotOptions$label.prop <- label.prop qgraphObject$plotOptions$label.norm <- label.norm # qgraphObject$plotOptions$overlay <- overlay qgraphObject$plotOptions$details <- details qgraphObject$plotOptions$title <- title qgraphObject$plotOptions$title.cex <- title.cex qgraphObject$plotOptions$preExpression <- preExpression qgraphObject$plotOptions$postExpression <- postExpression qgraphObject$plotOptions$legend.mode <- legend.mode qgraphObject$plotOptions$srt <- srt qgraphObject$plotOptions$gray <- gray # qgraphObject$plotOptions$overlaySize <- overlaySize qgraphObject$plotOptions$plotELBG <- plotELBG qgraphObject$plotOptions$alpha <- alpha qgraphObject$plotOptions$width <- width qgraphObject$plotOptions$height <- height qgraphObject$plotOptions$aspect <- aspect qgraphObject$plotOptions$rescale <- rescale qgraphObject$plotOptions$barsAtSide <- barsAtSide qgraphObject$plotOptions$bgres <- bgres qgraphObject$plotOptions$bgcontrol <- bgcontrol qgraphObject$plotOptions$resolution <- res qgraphObject$plotOptions$subpars <- subpars qgraphObject$plotOptions$subplotbg <- subplotbg qgraphObject$plotOptions$usePCH <- usePCH qgraphObject$plotOptions$node.resolution <- node.resolution qgraphObject$plotOptions$noPar <- noPar qgraphObject$plotOptions$meanRange <- meanRange qgraphObject$plotOptions$drawPies <- drawPies qgraphObject$plotOptions$pieRadius <- pieRadius qgraphObject$plotOptions$pastel <- pastel qgraphObject$plotOptions$piePastel <- piePastel qgraphObject$plotOptions$rainbowStart <- rainbowStart qgraphObject$plotOptions$pieCIs <- pieCIs if (!DoNotPlot) { plot(qgraphObject) invisible(qgraphObject) } else { return(qgraphObject) } } qgraph/R/cor_auto.R0000644000176200001440000001100214473011655013673 0ustar liggesusers## Automatically computes a correlation matrix: # Wrapper around lavCor which detects ordinal variables! cor_auto <- function( data, # A data frame select, # Columns to select detectOrdinal = TRUE, # Detect ordinal variables ordinalLevelMax = 7, # Maximum amount of levels to be classified as ordinal npn.SKEPTIC = FALSE, # If TRUE, will compute nonparanormal SKEPTIC on fully continous data forcePD = FALSE, # Forces the result to be positive definite using nearPD from Matrix missing = "pairwise", verbose=TRUE ) { # Check for data frame: # if (!is.data.frame(data)) # { data <- as.data.frame(data) # } # Select columns: if (!missing(select)) { data <- subset(data, select = select) } # Remove factors: Factors <- sapply(data,is,"factor") & !sapply(data,is,"ordered") if (any(Factors)) { if (verbose){ message(paste("Removing factor variables:",paste(names(data)[Factors], collapse = "; "))) } data <- data[,!Factors] } # Remove columns with all NA: data <- data[,sapply(data,function(x)mean(is.na(x)))!=1] # Detect ordinal: Numerics <- which(sapply(data,is.numeric) | sapply(data,is.integer)) if (detectOrdinal & length(Numerics) > 0) { isOrd <- sapply(Numerics, function(i) { isInt <- is.integer(data[,i]) | all(data[,i] %% 1 == 0, na.rm=TRUE) nLevel <- length(unique(data[,i])) return(isInt & nLevel <= ordinalLevelMax) } ) if (any(isOrd)) { if (verbose){ message(paste("Variables detected as ordinal:",paste(names(data)[Numerics][isOrd], collapse = "; "))) } for (i in Numerics[isOrd]) { data[,i] <- ordered(data[,i]) } } } ### START COMPUTING CORRELATIONS ### # IF ALL NUMERIC OR INTEGER, NONPARANORMAL SKEPTIC: if (all(sapply(data,is.numeric) | sapply(data,is.integer) ) & npn.SKEPTIC) { # message("All variables detected to be continuous, computing nonparanormal skeptic!") for (i in seq_len(ncol(data))) data[,i] <- as.numeric(data[,i]) if(!requireNamespace("huge")) stop("'huge' package needs to be installed.") CorMat <- huge::huge.npn(data, "skeptic") } else { #provide needed arguments for lavcor if(missing == "fiml"){ #fiml needs ml, TRUE and fit to have estimation in object lavobject <- suppressWarnings(lavaan::lavCor(data, missing = missing, se = "none", meanstructure = TRUE, estimator = "ML", output = "fit")) #compute correlation matrix from covariance matrix CorMat <- cov2cor(lavaan::inspect(lavobject, "cov.ov")) class(CorMat) <- "matrix" } else{ #use defaults for other options meanstructure <- FALSE estimator <- "two.step" CorMat <- suppressWarnings(lavaan::lavCor(data, missing = missing, meanstructure = meanstructure, estimator = estimator)) class(CorMat) <- "matrix" } } # Check for positive definite: if(forcePD & !all(eigen(CorMat)$values > 0)) { warning("Correlation matrix is not positive definite. Finding nearest positive definite matrix") CorMat <- as.matrix(Matrix::nearPD(CorMat, corr = TRUE, ensureSymmetry = TRUE, keepDiag = TRUE)$mat) } return(CorMat) # ## If all ordinal, do tetrachoric or polychoric: # if (all(sapply(data,is,"ordered"))) # { # nLevel <- sapply(data,nlevels) # # # Tetrachoric: # if (all(nLevel == 2)) # { # message("Binary data detected, computing tetrachoric correlations!") # for (i in seq_len(ncol(data))) data[,i] <- as.numeric(data[,i]) # res <- tetrachoric(as.matrix(data)) # CorMat <- as.matrix(res$rho) # attr(CorMat, "thresholds") <- res$tau # return(CorMat) # # } else { # message("Polytomous data detected, computing polychoric correlations!") # for (i in seq_len(ncol(data))) data[,i] <- as.numeric(data[,i]) # res <- polychoric(as.matrix(data)) # CorMat <- as.matrix(res$rho) # attr(CorMat, "thresholds") <- res$tau # return(CorMat) # } # # } # # # Else shared data detected, use muthen1984 from lavaan: # message("Both continuous and ordinal data detected, using muthen1984 from Lavaan package!") # ov.names <- names(data) # ov.types <- lavaan:::lav_dataframe_check_vartype(data, ov.names=ov.names) # ov.levels <- sapply(lapply(data, levels), length) # mutRes <- lavaan:::muthen1984(data, ov.names, ov.types, ov.levels) # # CorMat <- mutRes$COR # attr(CorMat,"thresholds") <- mutRes$TH # return(CorMat) } qgraph/R/mutualInformation.R0000644000176200001440000000317314430573263015610 0ustar liggesusers# entropy <- function(covMat, subset){ # covMat <- covMat[subset,subset,drop=FALSE] # 1/2 * log((2*pi*exp(1))^ncol(covMat) * det(covMat), 2 ) # } mutualInformation <- function( ggm, from, # Defaults to all nodes to = "all", # Defaults to all nodes but node of interest covMat ){ if (!missing(covMat)){ if (!missing(ggm)) stop("If 'covMat' is not missing, 'ggm' must be missing.") corMat <- cov2cor(covMat) } else { net <- getWmat(ggm) diag(net) <- 0 inv <- diag(nrow(net)) - net if (any (eigen(inv)$values < 0)) stop("Network is not a valid partial correlation network") corMat <- cov2cor(solve(inv)) colnames(corMat) <- colnames(net) } if (missing(from)){ from <- seq_len(ncol(corMat)) } # If from and to are characters, match to labels: if (is.character(from)){ if (!all(from %in% colnames(corMat))) stop("Node names not found in column names of network") from <- match(from,colnames(corMat)) } if (is.character(to) && !identical(to,"all")){ if (!all(to %in% colnames(corMat))) stop("Node names not found in column names of network") to <- match(to,colnames(corMat)) } # Mutual information: res <- sapply(from, function(f){ # entropy(corMat, f) + entropy(corMat, to) - entropy(corMat, unique(c(from,to))) if (identical(to,"all")){ to2 <- seq_len(ncol(corMat))[-f] } else { to2 <- to } if (any(f == to2)) return(NA) all <- unique(c(f,to2)) 1/2 * log((det(corMat[f,f,drop=FALSE]) * det(corMat[to2,to2,drop=FALSE]) / det(corMat[all,all,drop=FALSE])) ,2) }) names(res) <- colnames(corMat)[from] return(res) }qgraph/R/makeBW.R0000644000176200001440000000347114430573263013242 0ustar liggesusersmakeBW <- function(x, colorlist = NA, plot = TRUE) { # convert a color qgraph in a black and white version # x is a qgraph object # supports up to 12 different colors for the nodes # colorilst = optional argument, a vector of color to guarantee a precise match # between colors and fillers # generate 12 combinations of density & angles as colors are in the original qgraph density <- c(10, 20, 30) angles <- c(0, 45, 90, 135) combos <- data.frame(expand.grid(density, angles)) names(combos) <- c("density", "angle") # reorder to pick up the most different ones first combos <- combos[c(1, 5, 9, 10, 2, 6, 7, 3, 4, 12, 8, 11), ] # make BW NODES clr <- x$graphAttributes$Nodes$color if(all(is.na(colorlist))) { unq <- unique(clr) } else { unq <- colorlist } # count how many different colors are there N <- length(unq) if(N > 12) stop("Too many colors: Black & White qgraphs can be plotted with up to 14 colors") # define the new combinations of density / angle for each node newcol <- data.frame(matrix(ncol= 2, nrow = length(clr))) names(newcol) <- names(combos) for(i in 1:N) { newcol[clr == unq[i],] <- combos[i,] } x$graphAttributes$Nodes$density <- as.numeric(newcol$density) x$graphAttributes$Nodes$angles <- as.numeric(newcol$angle) x$graphAttributes$Nodes$borders <- rep(TRUE, nrow(newcol)) x$graphAttributes$Nodes$border.width <- rep(1, nrow(newcol)) x$plotOptions$usePCH <- FALSE # lblcol <- newcol$color # lblcol[newcol$color != "white"] <- "white" # lblcol[newcol$color == "white"] <- "black" # x$graphAttributes$Nodes$label.color <- lblcol # make BW EDGES x$graphAttributes$Edges$lty <- 2-as.numeric(x$Edgelist$weight >= 0) # plot and return the qgraph object if(plot) plot(x) invisible(x) }qgraph/R/00PolyShapes.R0000644000176200001440000005771714430573263014337 0ustar liggesusers### POLYGON SHAPES ### CROWNPOLY <- structure(list(x = c(0, 0.0526315789473684, 0.105263157894737, 0.157894736842105, 0.210526315789474, 0.263157894736842, 0.315789473684211, 0.368421052631579, 0.421052631578947, 0.473684210526316, 0.526315789473684, 0.578947368421053, 0.631578947368421, 0.684210526315789, 0.736842105263158, 0.789473684210526, 0.842105263157895, 0.894736842105263, 0.947368421052632, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.973684210526316, 0.947368421052632, 0.921052631578947, 0.894736842105263, 0.868421052631579, 0.842105263157895, 0.815789473684211, 0.789473684210526, 0.763157894736842, 0.736842105263158, 0.710526315789474, 0.684210526315789, 0.657894736842105, 0.631578947368421, 0.605263157894737, 0.578947368421053, 0.552631578947368, 0.526315789473684, 0.5, 0.5, 0.473684210526316, 0.447368421052632, 0.421052631578947, 0.394736842105263, 0.368421052631579, 0.342105263157895, 0.315789473684211, 0.289473684210526, 0.263157894736842, 0.236842105263158, 0.210526315789474, 0.184210526315789, 0.157894736842105, 0.131578947368421, 0.105263157894737, 0.0789473684210527, 0.0526315789473685, 0.0263157894736842, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0263157894736842, -0.0526315789473684, -0.0789473684210526, -0.105263157894737, -0.131578947368421, -0.157894736842105, -0.184210526315789, -0.210526315789474, -0.236842105263158, -0.263157894736842, -0.289473684210526, -0.315789473684211, -0.342105263157895, -0.368421052631579, -0.394736842105263, -0.421052631578947, -0.447368421052632, -0.473684210526316, -0.5, -0.5, -0.526315789473684, -0.552631578947368, -0.578947368421053, -0.605263157894737, -0.631578947368421, -0.657894736842105, -0.684210526315789, -0.710526315789474, -0.736842105263158, -0.763157894736842, -0.789473684210526, -0.815789473684211, -0.842105263157895, -0.868421052631579, -0.894736842105263, -0.921052631578947, -0.947368421052632, -0.973684210526316, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -0.947368421052632, -0.894736842105263, -0.842105263157895, -0.789473684210526, -0.736842105263158, -0.684210526315789, -0.631578947368421, -0.578947368421053, -0.526315789473684, -0.473684210526316, -0.421052631578947, -0.368421052631579, -0.315789473684211, -0.263157894736842, -0.210526315789474, -0.157894736842105, -0.105263157894737, -0.0526315789473685, 0), y = c(-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.447368421052632, -0.394736842105263, -0.342105263157895, -0.289473684210526, -0.236842105263158, -0.184210526315789, -0.131578947368421, -0.0789473684210527, -0.0263157894736842, 0.0263157894736842, 0.0789473684210527, 0.131578947368421, 0.184210526315789, 0.236842105263158, 0.289473684210526, 0.342105263157895, 0.394736842105263, 0.447368421052632, 0.5, 0.5, 0.473684210526316, 0.447368421052632, 0.421052631578947, 0.394736842105263, 0.368421052631579, 0.342105263157895, 0.315789473684211, 0.289473684210526, 0.263157894736842, 0.236842105263158, 0.210526315789474, 0.184210526315789, 0.157894736842105, 0.131578947368421, 0.105263157894737, 0.0789473684210527, 0.0526315789473685, 0.0263157894736842, 0, 0, 0.0263157894736842, 0.0526315789473684, 0.0789473684210526, 0.105263157894737, 0.131578947368421, 0.157894736842105, 0.184210526315789, 0.210526315789474, 0.236842105263158, 0.263157894736842, 0.289473684210526, 0.315789473684211, 0.342105263157895, 0.368421052631579, 0.394736842105263, 0.421052631578947, 0.447368421052632, 0.473684210526316, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.473684210526316, 0.447368421052632, 0.421052631578947, 0.394736842105263, 0.368421052631579, 0.342105263157895, 0.315789473684211, 0.289473684210526, 0.263157894736842, 0.236842105263158, 0.210526315789474, 0.184210526315789, 0.157894736842105, 0.131578947368421, 0.105263157894737, 0.0789473684210527, 0.0526315789473685, 0.0263157894736842, 0, 0, 0.0263157894736842, 0.0526315789473684, 0.0789473684210526, 0.105263157894737, 0.131578947368421, 0.157894736842105, 0.184210526315789, 0.210526315789474, 0.236842105263158, 0.263157894736842, 0.289473684210526, 0.315789473684211, 0.342105263157895, 0.368421052631579, 0.394736842105263, 0.421052631578947, 0.447368421052632, 0.473684210526316, 0.5, 0.5, 0.447368421052632, 0.394736842105263, 0.342105263157895, 0.289473684210526, 0.236842105263158, 0.184210526315789, 0.131578947368421, 0.0789473684210527, 0.0263157894736842, -0.0263157894736842, -0.0789473684210527, -0.131578947368421, -0.184210526315789, -0.236842105263158, -0.289473684210526, -0.342105263157895, -0.394736842105263, -0.447368421052632, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5)), .Names = c("x", "y"), row.names = c(NA, -180L ), class = "data.frame") ELLIPSEPOLY <- list( x = sin(seq(0, 2*pi, length = 200)), y = cos(seq(0, 2*pi, length = 200)) ) HEARTPOLY <- structure(list(x = c(0.000524613718038136, 0.00152037576817299, 0.00837191368505907, 0.0263525265968505, 0.0596231845491932, 0.110803218240353, 0.180680475594751, 0.268087394223726, 0.369954594919716, 0.481537715428025, 0.596797695163824, 0.70890097596103, 0.810795329588381, 0.895810220726289, 0.95822837243697, 0.993777720620083, 1, 0.976463167095426, 0.924798756665914, 0.848560847051974, 0.752919185599284, 0.644213780440664, 0.529410581862747, 0.415506655649061, 0.308937708512294, 0.21504056435074, 0.137618233447556, 0.0786460081162648, 0.0381443807024719, 0.0142296425596522, 0.0033371202850081, 0.000596558943702696, 0.000325551274901859, -0.00340364703380058, -0.0161751396581412, -0.0426714553619016, -0.0861970864784513, -0.148326642404516, -0.22870961369511, -0.325049731954249, -0.433261288308903, -0.547788934599377, -0.662062851652114, -0.769049036210921, -0.861845916483694, -0.934274319260742, -0.981408353675419, -1, -0.988759630359335, -0.948467499354436, -0.881906282519685, -0.79362066640824, -0.689525383893214, -0.576396567854278, -0.461291662282501, -0.350949456597994, -0.251223532033918, -0.166599374905229, -0.0998378919663984, -0.0517767300327744, -0.0213066683594325, -0.00552468083396929, -4.94560549954004e-05), y = c(0.520897709618179, 0.534947678325485, 0.575452559134198, 0.637673315472304, 0.714338676813725, 0.796497437137179, 0.87454802681761, 0.939310336192077, 0.98300223008988, 1, 0.987295403433758, 0.944606765832239, 0.874151080567606, 0.78013077798753, 0.66802603706674, 0.543805951388826, 0.413176650345883, 0.28097147888861, 0.15076013428112, 0.024715092692505, -0.0962689496523439, -0.212246769666327, -0.323760566618399, -0.43133676232965, -0.535035885584381, -0.634135831892105, -0.727002022607152, -0.811163164401614, -0.883574411157736, -0.941016958598965, -0.980559871117998, -1, -0.998200691457891, -0.975268605096653, -0.932537068881262, -0.872358940152541, -0.797745974695624, -0.711919363179301, -0.617852572062957, -0.517889982248489, -0.413512473049402, -0.305295947111203, -0.193074914032843, -0.0762862599001901, 0.045565535450107, 0.172405369550093, 0.303141763887568, 0.435369118261749, 0.565288730801492, 0.687886497384175, 0.79736440992733, 0.887780569712151, 0.953815194758073, 0.99155416691318, 0.999171547693052, 0.977400335131055, 0.929705927528264, 0.86211598567256, 0.782708083545859, 0.700805731636508, 0.625976704926773, 0.566958524171822, 0.530649611127533 )), .Names = c("x", "y")) STARPOLY = structure(list(x = c(0, 0.0123744263640521, 0.0247488527281041, 0.0371232790921562, 0.0494977054562083, 0.0618721318202603, 0.0742465581843124, 0.0866209845483645, 0.0989954109124165, 0.111369837276469, 0.123744263640521, 0.136118690004573, 0.148493116368625, 0.160867542732677, 0.173241969096729, 0.185616395460781, 0.197990821824833, 0.210365248188885, 0.222739674552937, 0.235114100916989, 0.235114100916989, 0.272795280673735, 0.31047646043048, 0.348157640187226, 0.385838819943971, 0.423519999700717, 0.461201179457462, 0.498882359214208, 0.536563538970953, 0.574244718727699, 0.611925898484444, 0.64960707824119, 0.687288257997935, 0.724969437754681, 0.762650617511426, 0.800331797268172, 0.838012977024917, 0.875694156781663, 0.913375336538408, 0.951056516295154, 0.951056516295154, 0.921023152622675, 0.890989788950196, 0.860956425277718, 0.830923061605239, 0.800889697932761, 0.770856334260282, 0.740822970587804, 0.710789606915325, 0.680756243242847, 0.650722879570368, 0.62068951589789, 0.590656152225411, 0.560622788552933, 0.530589424880454, 0.500556061207976, 0.470522697535497, 0.440489333863019, 0.41045597019054, 0.380422606518061, 0.380422606518061, 0.391336429979873, 0.402250253441684, 0.413164076903495, 0.424077900365306, 0.434991723827117, 0.445905547288928, 0.45681937075074, 0.467733194212551, 0.478647017674362, 0.489560841136173, 0.500474664597984, 0.511388488059795, 0.522302311521606, 0.533216134983417, 0.544129958445229, 0.55504378190704, 0.565957605368851, 0.576871428830662, 0.587785252292473, 0.587785252292473, 0.556849186382343, 0.525913120472213, 0.494977054562083, 0.464040988651953, 0.433104922741822, 0.402168856831692, 0.371232790921562, 0.340296725011432, 0.309360659101302, 0.278424593191172, 0.247488527281041, 0.216552461370911, 0.185616395460781, 0.154680329550651, 0.123744263640521, 0.0928081977303905, 0.0618721318202603, 0.0309360659101302, 4.89858719658941e-17, 4.89858719658941e-17, -0.0309360659101301, -0.0618721318202603, -0.0928081977303904, -0.123744263640521, -0.154680329550651, -0.185616395460781, -0.216552461370911, -0.247488527281041, -0.278424593191171, -0.309360659101302, -0.340296725011432, -0.371232790921562, -0.402168856831692, -0.433104922741822, -0.464040988651952, -0.494977054562083, -0.525913120472213, -0.556849186382343, -0.587785252292473, -0.587785252292473, -0.576871428830662, -0.565957605368851, -0.55504378190704, -0.544129958445228, -0.533216134983417, -0.522302311521606, -0.511388488059795, -0.500474664597984, -0.489560841136173, -0.478647017674362, -0.46773319421255, -0.456819370750739, -0.445905547288928, -0.434991723827117, -0.424077900365306, -0.413164076903495, -0.402250253441684, -0.391336429979873, -0.380422606518061, -0.380422606518061, -0.41045597019054, -0.440489333863019, -0.470522697535497, -0.500556061207976, -0.530589424880454, -0.560622788552933, -0.590656152225411, -0.62068951589789, -0.650722879570368, -0.680756243242847, -0.710789606915325, -0.740822970587804, -0.770856334260283, -0.800889697932761, -0.83092306160524, -0.860956425277718, -0.890989788950197, -0.921023152622675, -0.951056516295154, -0.951056516295154, -0.913375336538408, -0.875694156781663, -0.838012977024917, -0.800331797268172, -0.762650617511426, -0.724969437754681, -0.687288257997935, -0.64960707824119, -0.611925898484444, -0.574244718727699, -0.536563538970953, -0.498882359214208, -0.461201179457462, -0.423519999700717, -0.385838819943971, -0.348157640187226, -0.31047646043048, -0.272795280673735, -0.235114100916989, -0.235114100916989, -0.222739674552937, -0.210365248188885, -0.197990821824833, -0.185616395460781, -0.173241969096729, -0.160867542732677, -0.148493116368625, -0.136118690004573, -0.123744263640521, -0.111369837276469, -0.0989954109124166, -0.0866209845483645, -0.0742465581843124, -0.0618721318202604, -0.0494977054562083, -0.0371232790921562, -0.0247488527281041, -0.0123744263640521, 0), y = c(1, 0.964400357776315, 0.928800715552629, 0.893201073328944, 0.857601431105259, 0.822001788881573, 0.786402146657888, 0.750802504434203, 0.715202862210518, 0.679603219986832, 0.644003577763147, 0.608403935539461, 0.572804293315776, 0.537204651092091, 0.501605008868406, 0.46600536664472, 0.430405724421035, 0.39480608219735, 0.359206439973664, 0.323606797749979, 0.323606797749979, 0.322838913361819, 0.32207102897366, 0.3213031445855, 0.320535260197341, 0.319767375809181, 0.318999491421022, 0.318231607032862, 0.317463722644703, 0.316695838256543, 0.315927953868383, 0.315160069480224, 0.314392185092064, 0.313624300703905, 0.312856416315745, 0.312088531927586, 0.311320647539426, 0.310552763151267, 0.309784878763107, 0.309016994374947, 0.309016994374947, 0.286247321105215, 0.263477647835481, 0.240707974565749, 0.217938301296016, 0.195168628026283, 0.17239895475655, 0.149629281486817, 0.126859608217084, 0.104089934947351, 0.0813202616776177, 0.0585505884078848, 0.0357809151381518, 0.0130112418684188, -0.0097584314013141, -0.0325281046710471, -0.0552977779407801, -0.078067451210513, -0.100837124480246, -0.123606797749979, -0.123606797749979, -0.159681018624977, -0.195755239499976, -0.231829460374974, -0.267903681249972, -0.303977902124971, -0.340052122999969, -0.376126343874967, -0.412200564749966, -0.448274785624964, -0.484349006499962, -0.520423227374961, -0.556497448249959, -0.592571669124957, -0.628645889999956, -0.664720110874954, -0.700794331749952, -0.736868552624951, -0.772942773499949, -0.809016994374947, -0.809016994374947, -0.787489784144687, -0.765962573914427, -0.744435363684166, -0.722908153453906, -0.701380943223645, -0.679853732993385, -0.658326522763125, -0.636799312532864, -0.615272102302604, -0.593744892072343, -0.572217681842083, -0.550690471611823, -0.529163261381562, -0.507636051151302, -0.486108840921042, -0.464581630690781, -0.443054420460521, -0.42152721023026, -0.4, -0.4, -0.42152721023026, -0.443054420460521, -0.464581630690781, -0.486108840921042, -0.507636051151302, -0.529163261381562, -0.550690471611823, -0.572217681842083, -0.593744892072344, -0.615272102302604, -0.636799312532864, -0.658326522763125, -0.679853732993385, -0.701380943223646, -0.722908153453906, -0.744435363684166, -0.765962573914427, -0.787489784144687, -0.809016994374948, -0.809016994374948, -0.772942773499949, -0.736868552624951, -0.700794331749953, -0.664720110874954, -0.628645889999956, -0.592571669124958, -0.556497448249959, -0.520423227374961, -0.484349006499962, -0.448274785624964, -0.412200564749966, -0.376126343874967, -0.340052122999969, -0.303977902124971, -0.267903681249972, -0.231829460374974, -0.195755239499976, -0.159681018624977, -0.123606797749979, -0.123606797749979, -0.100837124480246, -0.0780674512105131, -0.0552977779407802, -0.0325281046710472, -0.00975843140131423, 0.0130112418684187, 0.0357809151381517, 0.0585505884078846, 0.0813202616776176, 0.104089934947351, 0.126859608217084, 0.149629281486816, 0.172398954756549, 0.195168628026282, 0.217938301296015, 0.240707974565748, 0.263477647835481, 0.286247321105214, 0.309016994374947, 0.309016994374947, 0.309784878763107, 0.310552763151266, 0.311320647539426, 0.312088531927586, 0.312856416315745, 0.313624300703905, 0.314392185092064, 0.315160069480224, 0.315927953868383, 0.316695838256543, 0.317463722644702, 0.318231607032862, 0.318999491421022, 0.319767375809181, 0.320535260197341, 0.3213031445855, 0.32207102897366, 0.322838913361819, 0.323606797749979, 0.323606797749979, 0.359206439973664, 0.39480608219735, 0.430405724421035, 0.46600536664472, 0.501605008868406, 0.537204651092091, 0.572804293315776, 0.608403935539461, 0.644003577763147, 0.679603219986832, 0.715202862210517, 0.750802504434203, 0.786402146657888, 0.822001788881573, 0.857601431105259, 0.893201073328944, 0.928800715552629, 0.964400357776315, 1)), .Names = c("x", "y"))qgraph/R/palettes.R0000644000176200001440000000155414430573263013715 0ustar liggesuserscolorblind <- function(n, shift = 0){ # Taken from: http://jfly.iam.u-tokyo.ac.jp/color/ if (n > 7) warning("'colorblind' palette only supports 8 colors.") Palette <- rgb( c(230,86,0,240,204,213,0), c(159,180,158,228,121,94,114), c(0,233,115,66,167,0,178), maxColorValue=255) Palette[(((shift + 1:n)-1)%%8)+1] } # ggplot theme: ggplot_palette <- function(n) { hues = seq(15, 375, length = n + 1) hcl(h = hues, l = 65, c = 100)[1:n] } # Fiftyshades: shadesOfGrey <- colorRampPalette(c("grey0", "grey100")) neon <- function(n, shift = 0){ # Taken from: http://jfly.iam.u-tokyo.ac.jp/color/ if (n > 6) warning("'colorblind' palette only supports 8 colors.") Palette <- c( "#ff3f3f", "#99FC20", "#ff48c4", "#f3ea5f", "#c04df9", "#2bd1fc" ) Palette[(((shift + 1:n)-1)%%8)+1] } qgraph/R/glasso_methods.R0000644000176200001440000002301614430573263015104 0ustar liggesusers# Selects optimal lamba based on EBIC for given covariance matrix. # EBIC is computed as in Foygel, R., & Drton, M. (2010, November). Extended Bayesian Information Criteria for Gaussian Graphical Models. In NIPS (pp. 604-612). Chicago # Computes partial correlation matrix given precision matrix: wi2net <- function(x) { x <- -cov2cor(x) diag(x) <- 0 x <- forceSymmetric(x) return(x) } # Computes optimal glasso network based on EBIC: EBICglassoCore <- function( S, # Sample covariance matrix n, # Sample size gamma = 0.5, penalize.diagonal = FALSE, # Penalize diagonal? nlambda = 100, lambda.min.ratio = 0.01, returnAllResults = FALSE, # If true, returns a list checkPD = TRUE, # Checks if matrix is positive definite and stops if not penalizeMatrix, # Optional logical matrix to indicate which elements are penalized countDiagonal = FALSE, # Set to TRUE to get old qgraph behavior: conting diagonal elements as parameters in EBIC computation. This is not correct, but is included to replicate older analyses refit = TRUE, # If TRUE, network structure is taken and non-penalized version is computed. ebicMethod = c("new","old"), regularized = TRUE, threshold = FALSE, verbose = TRUE, criterion = "ebic", ... # glasso arguments ) { ebicMethod <- match.arg(ebicMethod) if (checkPD){ if (any(eigen(S)$values < 0)) stop("'S' is not positive definite") } # Standardize cov matrix: S <- cov2cor(S) # Compute lambda sequence (code taken from huge package): lambda.max = max(max(S - diag(nrow(S))), -min(S - diag(nrow(S)))) lambda.min = lambda.min.ratio*lambda.max lambda = exp(seq(log(lambda.min), log(lambda.max), length = nlambda)) # Run glasso path: if (missing(penalizeMatrix)){ glas_path <- glassopath(S, lambda, trace = 0, penalize.diagonal=penalize.diagonal, ...) }else{ glas_path <- list( w = array(0, c(ncol(S), ncol(S), length(lambda))), wi = array(0, c(ncol(S), ncol(S), length(lambda))), rholist = lambda ) for (i in 1:nlambda){ res <- glasso(S, penalizeMatrix * lambda[i], trace = 0, penalize.diagonal=penalize.diagonal, ...) glas_path$w[,,i] <- res$w glas_path$wi[,,i] <- res$wi } } # Threshold: if (threshold){ for (i in 1:nlambda){ # Degree: p <- ncol(glas_path$wi[,,i]) # D <- max(centrality(ifelse( glas_path$wi[,,i] != 0,1, 0))$OutDegree) thresh <- (log(p*(p-1)/2)) / sqrt(n) glas_path$wi[,,i] <- ifelse(abs(glas_path$wi[,,i]) < thresh,0,glas_path$wi[,,i]) } } # Compute EBICs: if (ebicMethod == "old"){ if (criterion != "ebic") stop("criterion must be 'ebic' when ebicMethod = 'old'") EBICs <- sapply(seq_along(lambda),function(i){ if (!regularized){ invSigma <- ggmFit(wi2net(glas_path$wi[,,i]), S, sampleSize = n, ebicTuning = gamma, refit = TRUE,verbose = FALSE)$invSigma } else { invSigma <- glas_path$wi[,,i] } EBIC(S, invSigma, n, gamma, countDiagonal=countDiagonal) }) } else { EBICs <- sapply(seq_along(lambda),function(i){ fit <- ggmFit(wi2net(glas_path$wi[,,i]), S, n, ebicTuning = gamma,refit = !regularized, verbose = FALSE) # print(fit$fitMeasures$ebic) # browser() fit$fitMeasures[[criterion]] }) } # lik <- sapply(seq_along(lambda),function(i){ # logGaus(S, glas_path$wi[,,i], n) # }) # # EBICs <- sapply(seq_along(lambda),function(i){ # EBIC(S, glas_path$wi[,,i], n, gamma, countDiagonal=countDiagonal) # }) # EBIC via lavaan codes: # EBICs <- sapply(seq_along(lambda),function(i){ # fit <- ggmFit(wi2net(glas_path$wi[,,i]), S, n, ebicTuning = gamma) # print(fit$fitMeasures$ebic) # # browser() # fit$fitMeasures$ebic # }) # Smallest EBIC: opt <- which.min(EBICs) # Check if rho is smallest: if (opt == 1){ message("Note: Network with lowest lambda selected as best network: assumption of sparsity might be violated.") } # Return network: net <- as.matrix(forceSymmetric(wi2net(glas_path$wi[,,opt]))) colnames(net) <- rownames(net) <- colnames(S) # Check empty network: if (all(net == 0)){ message("An empty network was selected to be the best fitting network. Possibly set 'lambda.min.ratio' higher to search more sparse networks. You can also change the 'gamma' parameter to improve sensitivity (at the cost of specificity).") } # Refit network: # Refit: if (refit){ if (verbose) message("Refitting network without LASSO regularization") if (!all(net[upper.tri(net)]!=0)){ glassoRes <- suppressWarnings(glasso::glasso(S, 0, zero = which(net == 0 & upper.tri(net), arr.ind=TRUE), trace = 0, penalize.diagonal=penalize.diagonal, ...)) } else { glassoRes <- suppressWarnings(glasso::glasso(S, 0, trace = 0, penalize.diagonal=penalize.diagonal, ...)) } net <- as.matrix(forceSymmetric(wi2net(glassoRes$wi))) colnames(net) <- rownames(net) <- colnames(S) optwi <- glassoRes$wi } else { optwi <- glas_path$wi[,,opt] } # If regularized and low lambda was selected, give warning: if (regularized && lambda[opt] < 0.1 * lambda.max && !isTRUE(threshold)){ warning("A dense regularized network was selected (lambda < 0.1 * lambda.max). Recent work indicates a possible drop in specificity. Interpret the presence of the smallest edges with care. Setting threshold = TRUE will enforce higher specificity, at the cost of sensitivity.") } # Return if (returnAllResults){ return(list( results = glas_path, ebic = EBICs, # loglik = lik, optnet = net, lambda = lambda, optwi = optwi )) } else return(net) } # Old function: # Computes optimal glasso network based on EBIC: EBICglasso <- function( S, # Sample covariance matrix n, # Sample size gamma = 0.5, penalize.diagonal = FALSE, # Penalize diagonal? nlambda = 100, lambda.min.ratio = 0.01, returnAllResults = FALSE, # If true, returns a list checkPD = TRUE, # Checks if matrix is positive definite and stops if not penalizeMatrix, # Optional logical matrix to indicate which elements are penalized countDiagonal = FALSE, # Set to TRUE to get old qgraph behavior: conting diagonal elements as parameters in EBIC computation. This is not correct, but is included to replicate older analyses refit = FALSE, # If TRUE, network structure is taken and non-penalized version is computed. threshold = FALSE, verbose = TRUE, # ebicMethod = c("new","old"), # ebicRefit = FALSE, ... # glasso arguments ) { EBICglassoCore(S=S, # Sample covariance matrix n=n, # Sample size gamma = gamma, penalize.diagonal = penalize.diagonal, # Penalize diagonal? nlambda = nlambda, lambda.min.ratio = lambda.min.ratio, returnAllResults = returnAllResults, # If true, returns a list checkPD = checkPD, # Checks if matrix is positive definite and stops if not penalizeMatrix = penalizeMatrix, # Optional logical matrix to indicate which elements are penalized countDiagonal = countDiagonal, # Set to TRUE to get old qgraph behavior: conting diagonal elements as parameters in EBIC computation. This is not correct, but is included to replicate older analyses refit = refit, # If TRUE, network structure is taken and non-penalized version is computed. ebicMethod = "old", regularized = TRUE, threshold=threshold, verbose=verbose, ...) } # # # # Computes optimal glasso network based on EBIC: # EBICglasso2 <- function( # S, # Sample covariance matrix # n, # Sample size # gamma = 0.5, # penalize.diagonal = FALSE, # Penalize diagonal? # nlambda = 100, # lambda.min.ratio = 0.01, # returnAllResults = FALSE, # If true, returns a list # checkPD = TRUE, # Checks if matrix is positive definite and stops if not # penalizeMatrix, # Optional logical matrix to indicate which elements are penalized # countDiagonal = FALSE, # Set to TRUE to get old qgraph behavior: conting diagonal elements as parameters in EBIC computation. This is not correct, but is included to replicate older analyses # refit = TRUE, # If TRUE, network structure is taken and non-penalized version is computed. # # ebicMethod = c("new","old"), # # ebicRefit = FALSE, # threshold = FALSE, # ... # glasso arguments # ) { # EBICglassoCore(S=S, # Sample covariance matrix # n=n, # Sample size # gamma = gamma, # penalize.diagonal = penalize.diagonal, # Penalize diagonal? # nlambda = nlambda, # lambda.min.ratio = lambda.min.ratio, # returnAllResults = returnAllResults, # If true, returns a list # checkPD = checkPD, # Checks if matrix is positive definite and stops if not # penalizeMatrix = penalizeMatrix, # Optional logical matrix to indicate which elements are penalized # countDiagonal = countDiagonal, # Set to TRUE to get old qgraph behavior: conting diagonal elements as parameters in EBIC computation. This is not correct, but is included to replicate older analyses # refit = refit, # If TRUE, network structure is taken and non-penalized version is computed. # ebicMethod = "new", # regularized = TRUE, # threshold=threshold, # ...) # } qgraph/R/qgraph_loadings.R0000644000176200001440000002076514430573263015243 0ustar liggesusers qgraph.loadings=function( fact, ...) { if (is(fact,"loadings")) fact <- fact[1:nrow(fact),1:ncol(fact)] arguments=list(...) if (length(arguments)>0) { for (i in 1:length(arguments)) { if (is(arguments[[i]],"qgraph") ) { if (!is.null(names(arguments[[i]]))) { for (j in 1:length(arguments[[i]])) { if (!(names(arguments[[i]])[j]%in%names(arguments))) { arguments[length(arguments)+1]=arguments[[i]][j] names(arguments)[length(arguments)]=names(arguments[[i]])[j] } } } } } } if (is.null(rownames(fact))) rownames(fact) <- 1:nrow(fact) # SET DEFAULT ARGUMENTS: if(is.null(arguments$resid)) resid=NULL else resid=arguments$resid if(is.null(arguments$factorCors)) factorCors=NULL else factorCors=arguments$factorCors if(is.null(arguments$residSize)) residSize=0.1 else residSize=arguments$residSize if(is.null(arguments$filetype)) filetype="default" else filetype=arguments$filetype if(is.null(arguments$vsize)) vsize=max((-1/72)*(nrow(fact))+5.35,1) else vsize=arguments$vsize if(is.null(arguments$groups)) groups=NULL else groups=arguments$groups if (is.factor(groups) | is.character(groups)) groups <- tapply(1:length(groups),groups,function(x)x) if(is.null(arguments$color)) color=NULL else color=arguments$color if(is.null(arguments$model)) model="none" else model=arguments$model if(is.null(arguments$crossloadings)) crossloadings=FALSE else crossloadings=arguments$crossloadings if(is.null(arguments$labels)) { labels <- TRUE if (nrow(fact) <= 20) { labels <- abbreviate(rownames(fact),3) } } else labels <- arguments$labels if(is.null(arguments$Fname)) Fname=NULL else Fname=arguments$Fname if(is.null(arguments$layout)) layout="circle" else layout=arguments$layout if (layout=="circular") layout <- "circle" if(is.null(arguments$legend)) { if (!is.null(groups) & !is.null(names(groups)) & filetype=="pdf") legend=TRUE else legend=FALSE } else legend=arguments$legend if(is.null(arguments$legend.cex)) legend.cex=1 else legend.cex=arguments$legend.cex # Output arguments: if(is.null(arguments$filetype)) filetype="default" else filetype=arguments$filetype if(is.null(arguments$filename)) filename="qgraph" else filename=arguments$filename if(is.null(arguments$width)) { if (is.null(dev.list()[dev.cur()])) width=10 else width=dev.size(units="in")[1] } else width=arguments$width if(is.null(arguments$height)) { if (is.null(dev.list()[dev.cur()])) { if (layout=="circle") height=10 else height=5 } else height=dev.size(units="in")[2] } else height=arguments$height if(is.null(arguments$pty)) pty='m' else pty=arguments$pty if(is.null(arguments$res)) res=320 else res=arguments$res # Start output: if (filetype=='default') if (is.null(dev.list()[dev.cur()])) dev.new(rescale="fixed",width=width,height=height) if (filetype=='R') dev.new(rescale="fixed",width=width,height=height) if (filetype=='eps') postscript(paste(filename,".eps",sep=""),height=height,width=width, horizontal=FALSE) if (filetype=='pdf') pdf(paste(filename,".pdf",sep=""),height=height,width=width) if (filetype=='tiff') tiff(paste(filename,".tiff",sep=""),units='in',res=res,height=height,width=width) if (filetype=='png') png(paste(filename,".png",sep=""),units='in',res=res,height=height,width=width) if (filetype=='jpg' | filetype=='jpeg') jpeg(paste(filename,".jpg",sep=""),units='in',res=res,height=height,width=width) if (filetype=="svg") { stop("filetype = 'svg' is no longer supported") # if (R.Version()$arch=="x64") stop("RSVGTipsDevice is not available for 64bit versions of R.") # require("RSVGTipsDevice") # if (!requireNamespace("RSVGTipsDevice", quietly = TRUE)) stop("Please install 'RSVGTipsDevice' package first.") # RSVGTipsDevice::devSVGTips(paste(filename,".svg",sep=""),width=width,height=height,title=filename) } # Rescale dims: if (pty=='s') { width=height=min(c(width,height)) } # Parameters: n=nrow(fact) k=ncol(fact) names=names(groups) # Max loadings: if (k>1) { maxload=apply(abs(fact),1,which.max) sorted=sort(maxload,index.return=T) sort2=sort(apply(fact,2,which.max),index.return=T)$ix #IDENTIFY GROUPS: if (!is.null(groups) ) { identity=vector("numeric",length(groups)) for (i in 1:length(groups)) { identity[i]=as.numeric(names(which.max(table(maxload[groups[[i]]])))) } if (length(unique(identity))==length(groups)) identified=TRUE else identified=FALSE } else identified=FALSE if (k1 ) identitysort=sort(identity,index=T)$ix # Set labels: Glabels=rep("",n+k) if (!is.logical(labels)) { Glabels[1:n]=labels } else if (is.logical(labels)) { if (labels == TRUE) { Glabels[1:n]=seq(nrow(fact)) } } Glabels[(n+1):(n+k)]=1:k if (!is.null(names) & identified) { for (i in 1:k) { Glabels[n+i]=names[identitysort[i]] } } if (k==1 & !is.null(Fname)) Glabels[n+1]=Fname # Vertex sizes if (length(vsize)==1) vsize=rep(vsize,2) Gvsize=rep(vsize[1],nrow(fact)+ncol(fact)) Gvsize[(n+1):(n+k)]=vsize[2] # Set colors: Gcolor = rep("white",nrow(fact)+ncol(fact)) if (is.null(color) & !is.null(groups)) color=rainbow(length(groups)) if (!is.null(groups)) { for (i in 1:length(groups)) { Gcolor[groups[[i]]]<-color[i] } } if (identified) { for (i in 1:k) { Gcolor[n+i]=color[identitysort[i]] } } # Set layout: if (layout!="circle") { l2=l=matrix(0,ncol=2,nrow=n+k) l2[,2]=c(rep(-1,n),rep(0,k)) l2[,1]=c(seq(-1,1,length=n),seq(-1,1,length=k+2)[2:(k+1)]) if (k>1) { if (!identified) { for (i in 1:n) l[i,]=l2[which(sorted$ix==i),] } else { l[unlist(groups[identitysort]),]<-l2[1:n,] } l[(n+1):(n+k),]=l2[(n+1):(n+k),] } else l=l2 } if (layout=="circle") { l2=l=matrix(0,ncol=2,nrow=n+k) tl=n+1 l2[1:n,1]=sin(seq(0,2*pi, length=tl))[-tl] l2[1:n,2]=cos(seq(0,2*pi, length=tl))[-tl] if (k>1) { if (!identified) for (i in 1:n) l[i,]=l2[which(sorted$ix==i),] if (identified) { l[unlist(groups[identitysort]),]<-l2[1:n,] } tl=k+1 l[(n+1):(n+k),1]=0.5*sin(seq(0,2*pi,length=tl)+(1*pi/k))[-tl] l[(n+1):(n+k),2]=0.5*cos(seq(0,2*pi,length=tl)+(1*pi/k))[-tl] } else l[1:n,]=l2[1:n,] } ### Set residuals ### curve <- 0 if (!is.null(resid)) { if (length(resid)!=n) stop("Length of residuals does not correspond to number of factors") m <- rbind(m, cbind( n+k+1:n, 1:n, resid)) Gvsize <- c(Gvsize,rep(0,n)) Gcolor <- c(Gcolor, rep("#00000000",n)) Glabels <- c(Glabels, rep("",n)) shape <- c(shape,rep("circle",n)) directed <- c(directed,rep(TRUE,n)) if (layout!="circle") { l <- rbind(l, l[1:n,]) l[n+k+1:n,2] <- -1 - residSize } else { l <- rbind(l, (1+residSize) * l[1:n,]) } } if (!is.null(factorCors)) { m <- rbind(m, cbind( rep(n+1:k,times=k), rep(n+1:k,each=k), c(factorCors) ) ) m <- m[m[,1] != m[,2],] if (layout!="circle") { if (is.null(resid)) curve <- c( rep(0, n*k ), rep(0.4, k^2 - k)) else curve <- c( rep(0, n*k + n ), rep(0.4, k^2 - k)) } directed <- c(directed,rep(TRUE, k^2-k)) } ### RUN QGRAPH ### # class(arguments)="qgraph" args <- list(input=m,layout=l,vsize=Gvsize,color=Gcolor,labels=Glabels,shape=shape,filetype="",curve=curve, height=height,width=width,legend=F,directed=directed,bidirectional=TRUE) args <- c(args,arguments[!names(arguments) %in% names(args)]) Q <- do.call(qgraph,args) # # Q <- qgraph(m,layout=l,vsize=Gvsize,color=Gcolor,labels=Glabels,shape=shape,filetype="",curve=curve, # height=height,width=width,legend=F,arguments,directed=directed,bidirectional=TRUE) Q$filetype <- filetype # Legend: if (legend & filetype=="pdf") { legend.cex=legend.cex*2 plot(1, ann = FALSE, axes = FALSE, xlim = c(-1, 1), ylim = c(-1 ,1 ),type = "n", xaxs = "i", yaxs = "i") legend (0,0, names(groups), col= color ,pch = 19, xjust=0.5, yjust=0.5, cex=legend.cex, bty='n') legend (0,0, names(groups), col= "black" ,pch = 1, xjust=0.5, ,yjust=0.5, cex=legend.cex, bty='n') } else if (legend & filetype!="pdf") warning("Legend in qgraph.loadings only supported for pdf output") if (filetype%in%c('pdf','png','jpg','jpeg','svg','eps','tiff')) dev.off() class(Q) <- "qgraph" invisible(Q) } qgraph/R/mixGraphs.R0000644000176200001440000000466514430573263014044 0ustar liggesusers# Mixes two qgraph graphs on pars: # - layout # - vsize # - esize # - layout # mixGraphs <- function(Graph1, Graph2, mix = 0.5) { # For now, break if edgelists are not identical: if (!identical(Graph1$Edgelist[c('from','to','directed','bidirectional')],Graph2$Edgelist[c('from','to','directed','bidirectional')])) { stop("Graphs must have identical edgelists for mixing") } NewGraph <- Graph2 # Mix weights: NewGraph$Edgelist$weight <- (1-mix) * Graph1$Edgelist$weight + mix * Graph2$Edgelist$weight # Mix Layout: NewGraph$layout <- (1-mix) * Graph1$layout + mix * Graph2$layout # Mix vsize: NewGraph$graphAttributes$Nodes$width <- (1-mix) * Graph1$graphAttributes$Nodes$width + mix * Graph2$graphAttributes$Nodes$width NewGraph$graphAttributes$Nodes$height <- (1-mix) * Graph1$graphAttributes$Nodes$height + mix * Graph2$graphAttributes$Nodes$height # Mix edge color: NewGraph$graphAttributes$Edges$color <- mapply(FUN=Fade,col=Graph2$graphAttributes$Edges$color, alpha=mix, bg = Graph1$graphAttributes$Edges$color) # Mix edge width: NewGraph$graphAttributes$Edges$width <- (1-mix) * Graph1$graphAttributes$Edges$width + mix * Graph2$graphAttributes$Edges$width # loopRotation: # Overwrite looprotation of Graph1 to negative if that is closer: Graph1$graphAttributes$Nodes$loopRotation <- ifelse( abs(Graph1$graphAttributes$Nodes$loopRotation - Graph2$graphAttributes$Nodes$loopRotation) < abs(Graph1$graphAttributes$Nodes$loopRotation - 2*pi - Graph2$graphAttributes$Nodes$loopRotation), ifelse( abs(Graph1$graphAttributes$Nodes$loopRotation - Graph2$graphAttributes$Nodes$loopRotation) < abs(Graph1$graphAttributes$Nodes$loopRotation + 2*pi - Graph2$graphAttributes$Nodes$loopRotation), Graph1$graphAttributes$Nodes$loopRotation, Graph1$graphAttributes$Nodes$loopRotation + 2*pi), Graph1$graphAttributes$Nodes$loopRotation - 2*pi) NewGraph$graphAttributes$Nodes$loopRotation <- ((1-mix) * Graph1$graphAttributes$Nodes$loopRotation + mix * Graph2$graphAttributes$Nodes$loopRotation) return(NewGraph) } smoothLayout <- function(x) { } smoothAnimationList <- function(x, smoothing = 5) { newList <- list() for (i in seq_len(length(x)-1)) { newList <- c(newList, list(x[[i]]), lapply(seq(0,1,length=smoothing),mixGraphs,Graph1=x[[i]],Graph2=x[[i+1]])) } newList <- c(newList, list(x[[length(x)]])) return(newList) }qgraph/R/glasso_tests.R0000644000176200001440000001720314430573263014604 0ustar liggesusers# Test version not to be included yet # # Computes optimal glasso network based on EBIC: # EBICglassoCluster <- function( # S, # Sample covariance matrix # n, # Sample size # cluster = matrix(1, ncol(S), ncol(S)), # gamma = 0.5, # penalize.diagonal = FALSE, # Penalize diagonal? # nlambda = 100, # lambda.min.ratio = 0.01, # returnAllResults = FALSE, # If true, returns a list # checkPD = TRUE, # Checks if matrix is positive definite and stops if not # penalizeMatrix, # Optional logical matrix to indicate which elements are penalized # countDiagonal = FALSE, # Set to TRUE to get old qgraph behavior: conting diagonal elements as parameters in EBIC computation. This is not correct, but is included to replicate older analyses # refit = FALSE, # If TRUE, network structure is taken and non-penalized version is computed. # ebicMethod = c("old","new"), # regularized = TRUE, # If FALSE: refit all networks # threshold = FALSE, # verbose = TRUE, # nCores = 1, # ... # glasso arguments # ) { # ebicMethod <- match.arg(ebicMethod) # # if (checkPD){ # if (any(eigen(S)$values < 0)) stop("'S' is not positive definite") # } # # # Standardize cov matrix: # S <- cov2cor(S) # # # Compute lambda sequence (code taken from huge package): # lambda.max = max(max(S - diag(nrow(S))), -min(S - diag(nrow(S)))) # lambda.min = lambda.min.ratio*lambda.max # lambda = exp(seq(log(lambda.min), log(lambda.max), length = nlambda)) # # # # Number of clusters: # diag(cluster) <- NA # nCluster <- length(unique(na.omit(c(cluster)))) # clusters <- sort(unique(na.omit(c(cluster)))) # allLambda <- do.call(expand.grid,lapply(clusters,function(x)lambda)) # # # Run glasso path (old codes): # # if (nCluster == 1 & missing(penalizeMatrix)){ # # glas_path <- glassopath(S, lambda, trace = 0, penalize.diagonal=penalize.diagonal, ...) # # nGraphs <- nlambda # # # # # Threshold: # # if (threshold){ # # for (i in 1:nGraphs){ # # # Degree: # # p <- ncol(glas_path$wi[,,i]) # # # D <- max(centrality(ifelse( glas_path$wi[,,i] != 0,1, 0))$OutDegree) # # threshold <- (log(p*(p-1)/2)) / sqrt(n) # # glas_path$wi[,,i] <- ifelse(abs(glas_path$wi[,,i]) < threshold,0,glas_path$wi[,,i]) # # # # } # # } # # # # }else{ # New codes: # nGraphs <- nrow(allLambda) # # # glas_path <- list( # # w = array(0, c(ncol(S), ncol(S), nGraphs)), # # wi = array(0, c(ncol(S), ncol(S), nGraphs)), # # rholist = allLambda # # ) # # # PenalizeMatrix: # if (missing(penalizeMatrix)){ # penalizeMatrix <- matrix(TRUE, ncol(S), ncol(S)) # } # # # Parallel: # if (nCores > 1){ # cl <- parallel::makePSOCKcluster(nCores - 1) # # Export to cluster: # parallel::clusterExport(cl, c("cluster","clusters","allLambda","S","penalizeMatrix","penalize.diagonal","regularized","threshold","countDiagonal","n"), envir = environment()) # } else { # cl <- NULL # } # # # Run the loop" # Results <- pblapply(1:nGraphs,function(i){ # # Construct penalty matrix: # lambdaMat <- cluster # for (c in seq_along(clusters)){ # lambdaMat[lambdaMat==clusters[c]] <- allLambda[i,c] # } # diag(lambdaMat) <- 0 # # res <- glasso(S, penalizeMatrix * lambdaMat, trace = 0, penalize.diagonal=penalize.diagonal, ...) # # # # Threshold? # if (threshold){ # p <- ncol(res$wi) # # D <- max(centrality(ifelse( glas_path$wi[,,i] != 0,1, 0))$OutDegree) # threshold <- (log(p*(p-1)/2)) / sqrt(n) # res$wi <- ifelse(abs(res$wi) < threshold,0,res$wi) # } # # # Refit without regularization? # if (!regularized){ # if (!all(res$wi[upper.tri(res$wi)] != 0)){ # res <- suppressWarnings(glasso::glasso(S, 0, zero = which(res$wi == 0 & upper.tri(res$wi), arr.ind=TRUE), trace = 0, penalize.diagonal=penalize.diagonal, ...)) # } else { # res <- suppressWarnings(glasso::glasso(S, 0, trace = 0, penalize.diagonal=penalize.diagonal, ...)) # } # } # # # Compute EBIC: # if (ebicMethod == "old"){ # res$EBIC <- EBIC(S, res$wi, n, gamma, countDiagonal=countDiagonal) # } else { # fit <- ggmFit(invSigma = res$wi, covMat = S, sampleSize = n, ebicTuning = gamma,refit = FALSE, verbose = FALSE) # res$EBIC <- fit$fitMeasures$ebic # } # # # return(res) # # glas_path$w[,,i] <- res$w # # glas_path$wi[,,i] <- res$wi # }, cl = cl) # # # # # Combine to path: # # glas_path <- list( # # w = do.call(abind::abind,c(lapply(Results,"[[","w"),list(along=3))), # # wi = do.call(abind::abind,c(lapply(Results,"[[","w"),list(along=3))) # # ) # # } # # # # # # Compute EBICs: # # if (ebicMethod == "old"){ # # EBICs <- sapply(1:nGraphs,function(i){ # # # if (ebicRefit){ # # # invSigma <- ggmFit(wi2net(glas_path$wi[,,i]), S, sampleSize = n, ebicTuning = gamma, refit = TRUE,verbose = FALSE)$invSigma # # # } else { # # # invSigma <- glas_path$wi[,,i] # # # } # # invSigma <- glas_path$wi[,,i] # # EBIC(S, invSigma, n, gamma, countDiagonal=countDiagonal) # # }) # # } else { # # EBICs <- sapply(1:nGraphs,function(i){ # # fit <- ggmFit(wi2net(glas_path$wi[,,i]), S, n, ebicTuning = gamma,refit = FALSE, verbose = FALSE) # # # print(fit$fitMeasures$ebic) # # # browser() # # fit$fitMeasures$ebic # # }) # # } # # # # lik <- sapply(seq_along(lambda),function(i){ # # logGaus(S, glas_path$wi[,,i], n) # # }) # # # # EBICs <- sapply(seq_along(lambda),function(i){ # # EBIC(S, glas_path$wi[,,i], n, gamma, countDiagonal=countDiagonal) # # }) # # # EBIC via lavaan codes: # # EBICs <- sapply(seq_along(lambda),function(i){ # # fit <- ggmFit(wi2net(glas_path$wi[,,i]), S, n, ebicTuning = gamma) # # print(fit$fitMeasures$ebic) # # # browser() # # fit$fitMeasures$ebic # # }) # # # # Smallest EBIC: # EBICs <- sapply(Results,'[[','EBIC') # opt <- which.min(EBICs) # # # Check if rho is smallest: # # if (opt == 1 && verbose){ # # # warning("Network with lowest lambda selected as best network. Try setting 'lambda.min.ratio' lower.") # # warning("Network with lowest lambda selected as best network.") # # } # # # Return network: # net <- as.matrix(forceSymmetric(wi2net(Results[[opt]]$wi))) # colnames(net) <- rownames(net) <- colnames(S) # # # Check empty network: # # if (all(net == 0) && verbose){ # # message("An empty network was selected to be the best fitting network. Possibly set 'lambda.min.ratio' higher to search more sparse networks. You can also change the 'gamma' parameter to improve sensitivity (at the cost of specificity).") # # } # # # Refit network: # # Refit: # if (refit && regularized){ # if (verbose) message("Refitting network without LASSO regularization") # glassoRes <- suppressWarnings(glasso::glasso(S, 0, zero = which(net == 0 & upper.tri(net), arr.ind=TRUE), trace = 0, penalize.diagonal=penalize.diagonal, ...)) # net <- as.matrix(forceSymmetric(wi2net(glassoRes$wi))) # colnames(net) <- rownames(net) <- colnames(S) # optwi <- glassoRes$wi # } else { # optwi <- Results[[opt]]$wi # } # # # Return # if (returnAllResults){ # return(list( # results = Results, # ebic = EBICs, # # loglik = lik, # optnet = net, # lambda = allLambda, # optwi = optwi # )) # } else return(net) # }qgraph/R/Fade.R0000644000176200001440000000126314430573263012730 0ustar liggesusersFade <- function(col,alpha,bg) { # col = color to fade # bg = color to fade to # alpha = inverse transparency, 1 = fully visable, 0 = fully transparant. if (missing(bg)) bg <- par("bg") if (length(bg)!=1) stop("'bg' must be of length 1") if (length(alpha)==1) alpha <- rep(alpha,length(col)) if (length(col)==1) col <- rep(col,length(alpha)) if (length(col)!=length(alpha)) stop("Length of 'col' not equal to length of 'alpha'") n <- length(col) rgbCols <- col2rgb(col)/255 rgbBG <- col2rgb(bg)/255 colAlpha <- col2rgb(col,alpha=TRUE)[4,]/255 Mix <- rgbCols*rep(alpha,each=3) + rgbBG%*%t(1-alpha) return(rgb(Mix[1,],Mix[2,],Mix[3,],colAlpha)) }qgraph/R/IntInNode.r0000644000176200001440000000611614430573263013762 0ustar liggesusersIntInNode <- function(layout,cex,cex2,shape,m,width=0.2,triangles=TRUE,col="black",side=1,inside=TRUE, flip=FALSE) { N <- nrow(layout) if (length(cex)==1) cex <- rep(cex,N) if (length(cex2)==1) cex2 <- rep(cex2,N) if (length(shape)==1) shape <- rep(shape,N) if (length(col)==1) col <- rep(col,N) if (length(side)==1) side <- rep(side,N) # m is vector of margins to plot lines, NA indicates no line # side: 1. bottom, 2. left, 3. top, 4. right. # inside: if TRUE thresholds are plotted in the node, filling from top to bottom, if FALSE they are plotted at the side. flip <- rep(flip,length=length(m)) for (i in seq_along(m)) { if (!is.na(m[i])) { # browser() x <- layout[i,1] y <- layout[i,2] xran <- Cent2Edge(layout[i,1],layout[i,2],pi/2,cex[i],cex2[i],shape[i])[1] - x yran <- Cent2Edge(layout[i,1],layout[i,2],0,cex[i],cex2[i],shape[i])[2] - y if (!inside) { if (side[i]==1) { for (j in 1:length(m[[i]])) { lines(c(x-xran+m[[i]][j]*xran*2,x-xran+m[[i]][j]*xran*2),c(y-yran-width*yran,y-yran+width*yran),col=col[i]) } } else if (side[i]==2) { for (j in 1:length(m[[i]])) { lines(c(x-xran-width*xran,x-xran+width*xran),c(y-yran+m[[i]][j]*yran*2,y-yran+m[[i]][j]*yran*2),col=col[i]) } } else if (side[i]==3) { for (j in 1:length(m[[i]])) { lines(c(x-xran+m[[i]][j]*xran*2,x-xran+m[[i]][j]*xran*2),c(y+yran-width*yran,y+yran+width*yran),col=col[i]) } } else if (side[i]==4) { for (j in 1:length(m[[i]])) { lines(c(x+xran-width*xran,x+xran+width*xran),c(y-yran+m[[i]][j]*yran*2,y-yran+m[[i]][j]*yran*2),col=col[i]) } } } else { if (side[i]==1) { for (j in 1:length(m[[i]])) { if (!flip){ lines(c(x-xran+m[[i]][j]*xran*2,x-xran+m[[i]][j]*xran*2),c(y-width*yran,y+width*yran),col=col[i]) } else { lines(c(x-xran+m[[i]][j]*xran*2-width*xran,x-xran+m[[i]][j]*xran*2+width*xran),c(y,y),col=col[i]) } } } else if (side[i]==2) { for (j in 1:length(m[[i]])) { lines(c(x-width*xran,x+width*xran),c(y-yran+m[[i]][j]*yran*2,y-yran+m[[i]][j]*yran*2),col=col[i]) } } else if (side[i]==3) { for (j in 1:length(m[[i]])) { lines(c(x-xran+m[[i]][j]*xran*2,x-xran+m[[i]][j]*xran*2),c(y-width*yran,y+width*yran),col=col[i]) } } else if (side[i]==4) { for (j in 1:length(m[[i]])) { lines(c(x-width*xran,x+width*xran),c(y-yran+m[[i]][j]*yran*2,y-yran+m[[i]][j]*yran*2),col=col[i]) } } } } } } # if (triangles) # { # points(x,y-yran+m[[i]][j]*yran*2,pch=17,cex=cex[1]/10,col=col[i]) # }qgraph/MD50000644000176200001440000001223514521151262012046 0ustar liggesusersff547ababc85108875b8e8f94239d4dd *COPYING dbdde27bb7548f7a0d12d48b70b25e0b *COPYRIGHTS 8693f50c04f58a8b995252c3e209fd14 *DESCRIPTION 52369120152c93e8236b4adc76236a45 *NAMESPACE ebbf9fafa80cd0b8364e82f6fb2b5b34 *NEWS bdc765e218448588b5774c4434371ac3 *R/00PolyShapes.R 8ec3dfbaaa5842955f9c6fafaf2614df *R/Cent2EdgeNode.R affcb13e3ecdae977828c34f75028add *R/DrawArrow.R 16a4ba0e2491bf5217433cbe921db64d *R/EBICgraph.R e26741c1a97f6f33729613ce3846a187 *R/FDRnetwork.R ebcac4462dcb3c008ae449b28e128122 *R/Fade.R cee6b0a630bc6d883cefdd09db2bc101 *R/IntInNode.r 9a54a54a4397d12e166c5bdd5ec640e1 *R/PLOT.R bc851d568840f0e90fe23c53f50d6fb9 *R/RcppExports.R bec4c869cdcc49d7368fa200d64fbb08 *R/SelfLoop.R 637d5a81fcecaec5c9561c74f7fb2a6b *R/VARglm.R 0ab244d3e2e75264f9326d875fa74581 *R/XKCD.R cd8821c5c9176c0ec56e85416344f5cf *R/addTitle.R 898cdc46dac5df7cc25aa18b23c789dc *R/addTrans.R 28451f372e23d924f5082dc12258c38d *R/as_ggraph.R 821919b7b8d7f68a06f5e20a445ec5fc *R/averageLayout.R 196d90976cecfb3736a57e54e86ff60f *R/centrality.R 8d1a01ae7bd0d4ce34298c920336a365 *R/centralityFunctions.R 07457d23123a8faf27c9b4c0929f8790 *R/centralityPlot.R 9b36fc580465ca85ea152809b9dcf395 *R/centralityTable.R e8a37048bb73ee2178d96112f073ed46 *R/clusteringPlot.R 7445cd7d6d417693e6b1555d3b9a0bec *R/clusteringTable.R 0db02fe94bd80cd8a7a9eaece9a7c8fc *R/cor_auto.R 04a5d5232a56de7e649f56386f3faf8d *R/drawEdge.R 6793111feb5f4932444c80abf2f2074c *R/drawNode.R a0283ebf562ff5b1d32a64489332437e *R/fixnames.R f227cf345b03a7f5bfd1568f606485d5 *R/flow.R d5d9b0310bb3dc92d2462ee64ab5f2b2 *R/getArgs.R 0ec6006e75af40d0f97b20d524239527 *R/getWmat.R 2dec023e043dd5498a8a0f608f926a70 *R/ggmFit.R d79fbb3a01fb159597ab8133834df259 *R/ggmModSelect.R 25a5d02d44c4c621ba1a2de15f6ad7e6 *R/glasso_methods.R 356e89b07599ae297ca9135f67d9e0be *R/glasso_tests.R 20fa2b43bd1dce32bd3464f4fde1a6fd *R/igraphConversion.R 37cc0419c2dd2dd4fbd580a6d9bad2f3 *R/isColor.R 9381e68cc35982cac716f57142d4e096 *R/layout.R ac3f50809c6f9b9d58dc9b4856229404 *R/logGaus.R 14456d7dbd5ed8aa099d3424793e3be7 *R/makeBW.R 8425ba9088a0823fed814833538cf803 *R/mapusr2in.R b6d3fe391415047940b5497b602d1733 *R/mixCols.R 9160b3e7974658f6de3078abb516cf77 *R/mixGraphs.R 0fd2ff232f3ad12beb46e1db1c1a1935 *R/mutualInformation.R 3c65b4c4c38e072fa78d38b0f2220dcc *R/palettes.R 5fbf2d0e9609a4f7ab29573e7a829e0a *R/pathways.R bc4f90020d199ba941fa883bebb42586 *R/pie2.R 608ab043e398d83fccfdef7e8eeff590 *R/printplotsummary.R 1d8e061029308b1041c9847b00e835b6 *R/qgraph.R 7fe38c42e56f7e945aca4c4682a5bd2d *R/qgraph.rack.R 71f10d2e01e11f98ed00a3e9b448601c *R/qgraphAnnotate.R bf78444adf261577a728bf9fb42f822f *R/qgraphD3.R aca9e9050961ec59d2f47cf1261ae3e0 *R/qgraphGUI.R 26ca214ee630e5c5a4835850421f42e9 *R/qgraph_arrow.R bf606ba70672695ce6b881dc8dd2afac *R/qgraph_growth.R 9cce4ec996cc2e0c31c6e780e1f20d19 *R/qgraph_loadings.R 7791a7f1213293998765ff8b183364c1 *R/qgraph_mixed.R 60904156bd686f695f9b54b59f79631d *R/scale2.R 33e29821cac74ffe8606447a861b22ae *R/sign0.R d98fc7517d9237f79ece06f5521773d9 *R/smallworldIndex.R 5fa6928fd88120b52a68fa9bf2262e39 *R/toJSONarray.R 7f4faa3301627be158332aa19f2ce697 *R/vein.R 44bf64ee6ba941a8f9ce0844667d5cdf *README 546d181acb8094f53a46464ff2a70a05 *data/big5.RData b6895611f7d06c2388a7bd46e4b3a37a *data/big5groups.RData ef4c71bd8480dade982007848616b354 *inst/CITATION ff547ababc85108875b8e8f94239d4dd *inst/COPYING 1a2af62c3ce2e2cc524a8a2067947f54 *inst/COPYRIGHTS 8a8932d28066e7426908d41ec9d18d51 *man/CentAndClusfuns.Rd 02f455b0a7b7673f634578dd42cc7cd8 *man/EBICglasso.Rd 7f4fa47c5db35c31acc837488f0bff47 *man/FDRnetwork.Rd 83626c521e52cbe58aa26b68a006b68b *man/VARglm.Rd 2e017f54af1dbbf169a0572a8ba14ef1 *man/as.igraph.Rd 876ccd321d6044f82a1e42cdbcbc85e2 *man/averageLayout.Rd 41e79525c5fe290a421aeeb8b5233487 *man/big5.Rd fafeab77f6b53f6ba6d273f9e8a40abb *man/big5groups.Rd 75d380692da3ae7c8f25fab1ba6fd5d7 *man/centrality.Rd ced010f24546b22648672afce1205107 *man/centrality_auto.Rd bfcc810a310258c0117e97c10b3aec1a *man/clustcoef_auto.Rd 7ede8fbea21bebce34aaee4576fa6569 *man/cor_auto.Rd b3ecf0f70b44929f8b9b0eb89afc2450 *man/flow.Rd 6def56397700916221e6035da92af5a2 *man/getWmat.Rd 9f016f4b44ed023b65aae9e7a74e7600 *man/ggmFit.Rd 952605258fcebfc2c6e093bcc095ccb1 *man/ggmModSelect.Rd a8b7f70012dccf9780ea0d91503753d2 *man/makeBW.Rd 09b94aad451f23b137109135e7ae5e20 *man/mat2vec.Rd 91cff3dc38bb54aa34454d7b11550488 *man/mutualInformation.Rd 898c9d7169a1dcad1a5261a94a76ae8b *man/pathways.Rd ccb0bd71591eab120c5bf7bb11e6411b *man/plot.qgraph.Rd 60c990eafedbfdc74f81ebf5fc4b3e5c *man/print.qgraph.Rd 1fea35b7aa92237f1eabb5abcba5403a *man/qgraph.Rd f2f1b1469407ba1a4a5aef722a4012fb *man/qgraph.animate.Rd f33b6a98de08e722ac9ebf7805e153bb *man/qgraph.layout.fruchtermanreingold.Rd e4291b100392bfd7162bc0131d34e7ac *man/qgraphMixed.Rd a8be45e6c1563a881865aa6456b67a53 *man/qgraph_loadings.Rd 9797361ee9e1a05bbf2538571a8b531b *man/smallworldIndex.Rd df80afd1ef1a048c4abd18cdddbf1d9a *man/smallworldness.Rd 1b629055a103ed2847fe3842c21779d8 *man/summary.qgraph.Rd bb914b0d7cd201e051a5f647a08a75bd *man/wi2net.Rd 83f130a0e16efaeac463afabb3bb909d *src/Makevars 5d071be4e6977c1f9babf740cabe5e5f *src/Makevars.win 79177597c96bff38d59a3a03afbbf43f *src/RcppExports.cpp 2f417f0f8e7bc72f8488c93907f47624 *src/layout_rcpp.cpp qgraph/inst/0000755000176200001440000000000014430573263012520 5ustar liggesusersqgraph/inst/COPYRIGHTS0000644000176200001440000000041014430573263014131 0ustar liggesusersCOPYRIGHT STATUS ---------------- This code is Copyright (C) 2010,2011,2012,2013,2014 Sacha Epskamp All code is subject to the GNU General Public License, Version 2. See the file COPYING for the exact conditions under which you may redistribute it. qgraph/inst/CITATION0000644000176200001440000000152314430573263013656 0ustar liggesuserscitHeader("To cite qgraph in publications use:") bibentry(bibtype = "Article", title = "{qgraph}: Network Visualizations of Relationships in Psychometric Data", author = c("Sacha Epskamp", "Ang\\'elique O. J. Cramer", "Lourens J. Waldorp", "Verena D. Schmittmann", "Denny Borsboom"), journal = "Journal of Statistical Software", year = "2012", volume = "48", number = "4", pages = "1--18", textVersion = paste("Sacha Epskamp, Angelique O. J. Cramer, Lourens J. Waldorp, Verena D. Schmittmann, Denny Borsboom (2012).", "qgraph: Network Visualizations of Relationships in Psychometric Data.", "Journal of Statistical Software, 48(4), 1-18.", "URL http://www.jstatsoft.org/v48/i04/.") ) qgraph/inst/COPYING0000644000176200001440000004365514430573263013570 0ustar liggesusers GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.