MASS/0000755000176000001440000000000013577111143011050 5ustar ripleyusersMASS/NAMESPACE0000644000176000001440000001431613243747121012275 0ustar ripleyusersuseDynLib(MASS, .registration = TRUE) # functions export(addterm, area, as.fractions, bandwidth.nrd, bcv, boxcox, con2tr, contr.sdif, corresp, cov.trob, denumerate, dose.p, dropterm, enlist, eqscplot, fbeta, fitdistr, fractions, frequency.polygon, gamma.dispersion, gamma.shape, ginv, glm.convert, glm.nb, glmmPQL, hist.FD, hist.scott, huber, hubers, is.fractions, isoMDS, kde2d, lda, ldahist, lm.gls, lm.ridge, lmwork, loglm, loglm1, logtrans, mca, mvrnorm, nclass.freq, neg.bin, negative.binomial, negexp.SSival, Null, parcoord, polr, psi.bisquare, psi.hampel, psi.huber, qda, rational, renumerate, rlm, rms.curv, rnegbin, sammon, select, Shepard, stdres, stepAIC, studres, theta.md, theta.ml, theta.mm, truehist, ucv, width.SJ, write.matrix) # formerly in package lqs export(cov.mcd, cov.mve, cov.rob, lmsreg, lqs, lqs.formula, ltsreg) # formerly needed for Recall # export(denumerate.formula, renumerate.formula) # documented, registered but unexported methods # export(addterm.default, addterm.glm, addterm.lm, # anova.loglm, anova.negbin, # boxcox.default, boxcox.formula, boxcox.lm, # confint.glm, confint.nls, # dropterm.default, dropterm.glm, dropterm.lm, # gamma.shape.glm, # loglm1.data.frame, loglm1.default, loglm1.xtabs, # logtrans.default, logtrans.formula, logtrans.lm, # pairs.lda, pairs.profile, plot.lda, plot.mca, # plot.profile, plot.ridgelm, predict.lda, predict.mca, # predict.qda, print.abbrev, print.Anova, # profile.glm, select.ridgelm, # summary.loglm, summary.negbin, summary.rlm) importFrom(grDevices, dev.flush, dev.hold, nclass.FD, nclass.scott, palette) importFrom(graphics, abline, axis, box, frame, hist, lines, matplot, mtext, pairs, par, plot, points, rect, segments, symbols, text) #import(stats) # triggers bug in wmtsa importFrom(stats, add1, anova, biplot, coef, confint, drop1, extractAIC, family, fitted, logLik, model.frame, nobs, predict, profile, residuals, se.contrast, simulate, terms, update, vcov, ## added in 7.3-42 .checkMFClasses, .getXlevels, IQR, add.scope, aov, approx, as.formula, binomial, cmdscale, contr.helmert, dbeta, dcauchy, dchisq, delete.response, density, deviance, dexp, df, dgamma, dgeom, dist, dlnorm, dlogis, dnbinom, dnorm, dpois, drop.scope, dt, dweibull, factor.scope, formula, glm, glm.control, glm.fit, is.empty.model, lm, lm.fit, lm.influence, lm.wfit, loglin, lsfit, mad, mahalanobis, make.link, median, model.extract, model.matrix, model.offset, model.response, model.weights, na.pass, napredict, naprint, naresid, optim, optimize, pcauchy, pchisq, pf, plogis, pnorm, qchisq, qf, qnorm, qt, quantile, rcauchy, rexp, rgamma, rlogis, rnorm, rpois, runif, sd, spline, summary.glm, uniroot, update.formula, var) S3method("[", fractions) S3method("[<-", fractions) S3method(addterm, default) S3method(addterm, glm) S3method(addterm, lm) S3method(addterm, mlm) S3method(addterm, negbin) S3method(addterm, survreg) S3method(anova, glmmPQL) S3method(anova, loglm) S3method(anova, negbin) S3method(anova, polr) S3method(as.character, fractions) S3method(biplot, correspondence) S3method(boxcox, default) S3method(boxcox, formula) S3method(boxcox, lm) S3method(coef, fitdistr) S3method(coef, lda) S3method(coef, loglm) S3method(coef, ridgelm) S3method(confint, glm) S3method(confint, nls) S3method(confint, polr) S3method(confint, profile.glm) S3method(confint, profile.nls) S3method(confint, profile.polr) S3method(corresp, xtabs) S3method(corresp, data.frame) S3method(corresp, default) S3method(corresp, factor) S3method(corresp, formula) S3method(corresp, matrix) S3method(denumerate, formula) S3method(dropterm, default) S3method(dropterm, glm) S3method(dropterm, lm) S3method(dropterm, mlm) S3method(dropterm, negbin) S3method(dropterm, survreg) #S3method(extractAIC, gls) #S3method(extractAIC, lme) S3method(extractAIC, loglm) S3method(extractAIC, polr) S3method(family, negbin) S3method(fitted, loglm) S3method(gamma.shape, glm) S3method(lda, data.frame) S3method(lda, default) S3method(lda, formula) S3method(lda, matrix) S3method(loglm1, data.frame) S3method(loglm1, default) S3method(loglm1, xtabs) S3method(logLik, fitdistr) S3method(logLik, glmmPQL) S3method(logLik, negbin) S3method(logLik, polr) S3method(logtrans, default) S3method(logtrans, formula) S3method(logtrans, lm) S3method(lqs, default) S3method(lqs, formula) S3method(Math, fractions) S3method(model.frame, lda) S3method(model.frame, polr) S3method(model.frame, qda) S3method(nobs, loglm) S3method(nobs, polr) S3method(Ops, fractions) S3method(pairs, lda) S3method(pairs, profile) S3method(plot, correspondence) S3method(plot, lda) S3method(plot, mca) S3method(plot, profile) S3method(plot, ridgelm) S3method(predict, glmmPQL) S3method(predict, lda) S3method(predict, lqs) S3method(predict, mca) S3method(predict, polr) S3method(predict, qda) S3method(predict, rlm) S3method(print, abbrev) S3method(print, Anova) S3method(print, anova.loglm) S3method(print, correspondence) S3method(print, fitdistr) S3method(print, fractions) S3method(print, gamma.shape) S3method(print, glm.dose) S3method(print, lda) S3method(print, loglm) S3method(print, lqs) S3method(print, mca) S3method(print, polr) S3method(print, qda) S3method(print, ridgelm) S3method(print, rlm) S3method(print, rms.curv) S3method(print, summary.loglm) S3method(print, summary.negbin) S3method(print, summary.polr) S3method(print, summary.rlm) S3method(profile, glm) S3method(profile, polr) S3method(qda, data.frame) S3method(qda, default) S3method(qda, formula) S3method(qda, matrix) S3method(renumerate, formula) S3method(residuals, loglm) S3method(rlm, default) S3method(rlm, formula) S3method(se.contrast, rlm) S3method(select, ridgelm) S3method(simulate, negbin) S3method(simulate, polr) S3method(Summary, fractions) S3method(summary, loglm) S3method(summary, negbin) S3method(summary, polr) S3method(summary, rlm) S3method(t, fractions) #S3method(terms, gls) #S3method(terms, lme) S3method(update, loglm) S3method(vcov, fitdistr) S3method(vcov, negbin) S3method(vcov, polr) S3method(vcov, rlm) MASS/LICENCE.note0000644000176000001440000000347013265353414013010 0ustar ripleyusersSoftware and datasets to support 'Modern Applied Statistics with S', fourth edition, by W. N. Venables and B. D. Ripley. Springer, 2002. From the text (pp. 464): These datasets and software are provided in good faith, but none of the authors, publishers nor distributors warrant their accuracy nor can be held responsible for the consequences of their use. This file is intended to clarify ownership and copyright: where possible individual files also carry brief copyright notices. Copyrights ========== File MASS/R/profiles.R copyright (C) 1996 D. M. Bates and W. N. Venables. port to R by B. D. Ripley copyright (C) 1998 corrections copyright (C) 2000,3,6 B. D. Ripley Our understanding is that the dataset files MASS/data/*.rda are not copyright. All other files are copyright (C) 1994-2018 W. N. Venables and B. D. Ripley. Those parts which were distributed with the first edition are also copyright (C) 1994 Springer-Verlag New York Inc, with all rights assigned to W. N. Venables and B. D. Ripley. Licence ======= This is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 or 3 of the License (at your option). This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Files share/licenses/GPL-2 and share/licenses/GPL-3 in the R (source or binary) distribution are copies of versions 2 and 3 of the 'GNU General Public License'. These can also be viewed at https://www.r-project.org/Licenses/ Bill.Venables@gmail.com ripley@stats.ox.ac.uk MASS/data/0000755000176000001440000000000013361333751011763 5ustar ripleyusersMASS/data/Boston.rda0000644000176000001440000003523511754562034013731 0ustar ripleyusers ]X>Az UDA:c1Į `L5kMK"b5]5w6#j̽<׿owgw⪧DH)uֶҿ6mt:g vMMEq ?9_sW\>Mѧony}z i lж#Mԉv2lﲏͧ.}ggӗ5V\?Yf^8v>IB Onc?ӈ8yay`슱ėa?bW9T.k 2~~v٭i[ߧG.[Uz~yЯNͼq ~]^8zc7 8Xn{savg[+QQ{jr{+hS۫8t-O[>tq68?rNF#MtlעAWؖ6G weO>lR7g;JqlobNkwl_q%DNXUn{~~U/8 9kgߌ >֡UIl\֕ʆmۼnO%,6{z3vX3+~d%6a߷Ϝf4ȶG?9g1 f&.eM]"e?3[91cH~lKLx6[sUl`GceW?XޖnC&#ʺ1آ3jxz^fٹH6h}f`}lFμ;g$w.:v2[0 컆Jf)E>vy.胉u Ua7avlJG Tf?ШEM;zS^[A&t[~ˢAڳ#Au3&fWwKN[[1h[bYm}ɶ^9iX֮+=4gY-L8³Gu_򋙭90'0a;Wmg{tޞlStٽ7ۮe[g٨,ȔZf;IJVʂZ6}ε 2乃6}V o|%9lMYG3z *d0;KnX~:tTZFÛ.l.6?}*;{nMi]#^qǩٙcM^~_c|m?QFLce?~uO僅cF?jw~**mkc@KnUi5w7/plcn=ǮNx!޵b%?1ݻqM̬|Krawk-Wn8vKj>3vׇcw0nnj_dAk~m6`7Iܾ;gs>7`e-թĭx68tQ%ح 3j­\e`'y2[˚zkJliEaKԥ:[mS+ɖ.Jpfݐ%y֖-QdflUNekwMc+3ߚހ}+[Q٢o:"[|gӈw,WbÖ*ڍ,dЧ6NG0~<&Ebs\#Ywe4-_>FG6>K[ʰuO_wM?mdl'6vt/lczԥ_c{{]vw҂BEmR*q;}cniу?{&`dsnYS37dkڧ7f9;nms@kvN ©_U~B>9|cG%L;oգ(vHv}Og'kLZݎ)mCNt?ggo}rqvr=`VuzAve&z?n~g gf+w^'h [W%]8zv|fa `;G.Z|Z`] ؎D:mdفe+ .#?;2y!5edfH&qw<ovnH*1*`S{>gg$cى1?4 ˞xVkA_vfagWj~\&vblU[!Noe9}R>~%̒ln#_ǻ9;;ǴTӚB/.k7^99~U쮍oC~صp{O6|ցPʉ P'ؾ㮥鴂-1Ń^l)\iWm(2y?6ۓt-kъlf@BgYQuarq=65`0)iFآdTڨ۴St lR4ΦlqWlj߿[p-,vo֥l1r%c <*կGlk_dol]iq{5?pX^hOO{ &Jw)8"ZT慖 =;uzU?psk#ԑܭ$nZ.J<Әw~6)ޞVTn03[ =ؒeF7솭^}?)ʚ<(Zt<`eړx1i;yfO-}_<9~=/-YrܿFm$#=[R<%xى',CHr@DXc}['6Ŋ=Օʿ/V?~]?p>R 4^dI^ZI,0~/WpoԼ^<4w)Br>h~[ q+lv=etQ?]IEtcY@vI&ַ쎲EvW9ݝU9hbKX| ve ]@%,,a)E{+/00_rsʿWƨsXft ^m_6,߀}`q= URNP1#,*cvиGǠjE/Ma!Oxa9'f^hB -%]15c1{ ?hэЗZZ A-YBoӢqzxMTWH.QDgG r7=-ŷX{€Jym,vR

]̎e*U<.1)³e0,{? yy6_,<8`OK-[3xR<_q_MҞ*boxworZos⾙iWEA^WcR4'}?ߞh^YDQB.RR[68dkc^kP7D% ~c(+ڱ|w~L`7gDžmUrpoޘbU{мQc~3HyT/GO'l+3x*JyA"icV;O'톭^Elʧ'h1Sk]U'ҔW>g4eEЧA^m"Wŋ7nExN~ zFL>n-~S%iHǵxYi~ߍqx^yU9Z}jEU>P"t%yY\1I0 麞紦w'x>5^n|(Qȍ)sM  7a<`OnحaRA πVJ4_!|r øA\F~cP4S{(?79~2S +nxOB7 m3xmy_oG2ǛXS1~ J܈zg)a.(_ )z"W6}.'S\s#V(u!WZ*&zAn¼CQ޹AccyئJ _U9>MފϻqCAz6R&7z1 c|ca[*۪^!rCX*}&4Jr3Snnq)}2{uJʩH$r} )~`$7*lļ!3ـ+s >nu27z(u978W_H/N8>PT\Uޕ(\~Lr1DM@y1_58 ~<ʍ;w7޿d FWsSЏxLv;,E=D?1c^I޷z|~cHo3 f=,܈ayb^' ^?3✞}⺓qq ~`ghF> zx' :`yy($uA)]dutF9l=(`P^M8Ff įQa 0:"~ }/e؟vayEb۷]vbD0#?znf'DF+fO1 ǍDŽz~<)!x%AY\q/nF?xA~g&.⤗g5f9B7=̘G'B\6< 9y1ÿ Ds;/@qHYгr`g"~L&؃t)`= nG&lS.7wM4o }fܯW z1Cfe%DaHo$s3&; /+"^Q& 㙬Oi;&7yDu+zء}E5P}O3dFGucq|D~b401S>F|1!>)zf#f@|RtR=m Kz$ê%~zUYa@]i@V㞸/ ;py] Γ_Fاu~iFF;='Ƹ_#pخנxo'N }%8cO<Ʃ=%DAᧄ>ԄzWM%D$nm,>.w)?N\w M$/+7})S fz^[?~DW|I"y%~g ~ /П%C)KW# >`?$oOSI&ΣN^=R@WJ gS~i||P]Bu_CG _ vZG=xźHv^|!=RDO W' q&o?۔g@c6!9/h_Io/bn=RIZ؎}ÞC`<.nrhn_l]Հ*!CߍAg\s! {<@( QN5Jov[L|/[Jxn[\󭷚L6q6k;?Ǎkop';*߰} [ڄ̝'6;>fry5s/~pVj9i=pgW;~7WC|gC9uue{_^֦ i_Y4C|F^6^~8=s-b=|!z+<EܡЭEc79xd\vGݣ˗$+~}ۉr%N$rn/ɻVo`U~LM5gИ}JzgZ,2E|wDxDXX֎gwQ~%~F+l+t#pAMC=ߑ6_Xb|_Y{׉2/pYY۝$ÿ TsS^^=,g(뮩XP'a!e0mfx|и*RuT s}˞UKȟЧxNI=^V7!e1/Ic7uX\Zux zVB/6 c~ .<7*YMcnΝG,+$Nqd]CgJwG}A˼ˋI_R9/!ТZ2ݏT4`] B 4iGΘOD߱˯Ł;Cq!?-4ąn'eQs9~OKվ}p;?\O* =E?'U4`HY*˅Rd+w*eN\n3$QУXNB$H|7nU,qTW1ECmv:U67#[9NWؓ^sCCཫ;"[Dn"yܩWnb~# ⤔[RNDr,`(ёl/Rgv"WvWf#<%{yVX1,or]n#{49cRKC㡿bG?o,(d<{eOQ+SFvSʃݫy.ӹO߃j'kbq`9X!ޛ{,=D9EK,G)K*Junu9{2LaOEr@TNGs]GnN[IRZr#n=Hb%/W\Tf3˩?'JyrŲ4rNܖ8 亁J;\Kt!{|AMSAa>Xe u˸5⚵0@num 6)mgH=nWAcdWMzm1? E<_^߹=g~̛/[AƋc;n~}Ք:ty 뭻+SVOU=l-s$Ik_}++'!/N^`;sv~1#(9'mҝ58`B;j@ʬ*ʗ1ܦT7m"(̞?ָ/`i<{:g/^g7iDZy?m_ă502Y.k߿~8{v{M ^]ξo41~X`'zG dIevy=58b~5>%5;9= a~gW/a+|>͍=rc']noBbO-z5Lf1uInO`;fvz9e ۣ]I]|eaz%?Tg7~|ԎT|\`onLƞDg?i7rc%M΍ŷqPafݝ_ ut]OO{WR4,yЖ0I1bo =<<_[jrcbr`7 ҼxKg"F7yHϼ*FAn7`+_WϥPSZř7a$g WoKOieXt[)7-ǵ>’DUͳ!,C|&]=偑biȗUk0ZQ= >S]Y U[|È8Aŗnu}>T&ە@|G?]\#T@1;s=]x{1.mgdWv= h3 ~2ߗ\ϡ;O|*s]ݢ႞ Čbgz";q; :.w.:x1?5s-'>b>{lυ<ļg<|jxS{Y'tp`b>{t tl2Ffvߣ\3|o;xfi4r] 7sNC/OD%̭Oo&X#;[!e^2B?C io-PT7kļƫ'E=2Wq$}7#P#c%; NO|"/q?:a{ƛ:Ⱦiίl3Ey`[Kz##Gĉ a=iB_ݚ 9$')|v^DU!G[MȥOЗ.*cw41^{nji +)ijL{0Ɲ"e\NM1~;8g.S{}NdoCGqXC~~}h?T_ޠk+l[V͘(=c+j6?)?N|2?h|_| gtɨoK S;-ߌQ_ngAEq) $ag*A G?q~OSWA'&@>4vGIv|C| 7kȷpX2l2!uCMqxcK9l|C/۠ ͹졉_4u{ U*򟆸KMA,K a4u ̈;!A{=BO*XW/HfG=үw8/,K7xjygēL!?^,{уӗXyo_^3Žhğ9/+ƾFO74uI軰sFB~ ;Se8.qC3aI. CD\kﳇ =.'M*ڥ>{N*x3GdSP_4MN|$8MP4MTX/S549zk|&SQm76b]z$ c|{x_חmrgKNď)~Ee]-`+`T'׃W'czA~Xx|=vU P%?q?,KV#~?x])c|{hBt~gC ط?}ba(~? CC_ >׾DᅴOA~hW z1?y^D' f[E;=C~пo[!G_ؗ[! ㎴N"hW/aa83= ]aQ b@4$ rH/ۡ'XF8F~Q {z RX%DcJ(Vt??T+!/VBWB.`W|"za?a"o{uޞx/89AzwCWI31Q\ i)Oؙ; Ag@~9Ruuw+ÿ\w{w~ӌ@x/?CEaE"H>?^CuB3OCV~;=@?E g|;кHa7b])\A ]0_?݀k|:Gwyf؏?/71΃p> [%ᇔ0^Hh=" D=;c\/^ȿc<wQHvNyr 6}7Hi OWCuu'?etŸ/[d7 7<'-b?;QSz&!{Qu&|o_Qh?8ȋ/xR%?u dﮚz]T<E 8O 8%P'9aG^\Dq_> y=EƉ?1Ge܏:"f9{0?#@W9[ C}yW U/W~w$$EyE>8cˀ_ʧQ*x :r/e]GߩFƴ&(q@OT'^c4`, {Be2' 򍀾pl?"D?e p_# %:!KoC@G  וjqJ{q{Q{{1ge\?(y%K]1ѰJh")tHq% ]Ǹ4:q"v:_(^s|S~AOUȧ"c;.˽"gWǣ}7P;OGhhICFiOG׿4ߌ+ל_% $` #h=2O 'Q*hG0Gߛ'y|w'XPM4yh4|kYO?z/C@?h?ɷl|PR8djhOwTxα_'W+7u -2$7DΧ3G B/E?t-}8SCv^HC / T$gD7ѡ  h_]_h4=+#yмgodd.~QO[=2QJhOq`%M UVG%ȭ2R?w^ =BQr}1_0>mqvYxYih>Hddi?2ΪSZM!a1q6~(3>'UE;5:X3>y <aAvAkj캪.|hݶCE3ųʠH跜*H_ô^gl:)kiXr&#%yU$z8Ȏ*R~@ɮU;ؓ[BꮊDΧi~Y;WhO]}_Σz&Z_'+SO)p }^k/i<{xhTo>_ONu'AS6!|:G}o)6g)h#!TRޥ|} ~O]T_t_Hp\4~0}%KrS/`D PS$o`1K7ztAqTQ8O+I_oTQT{JF}N~h\"|:OQ¶>8R=\hQ͸Ϣю78D!/ΣtJtS8(PDRl~ZtҢs'Ni۪>Һgڤ} MԺgӢ]oԦ=Z컥hAWe:FԴi4j6ӵ"8B[(նĜCi'd!GOUSMASS/data/immer.rda0000644000176000001440000000070311754562034013566 0ustar ripleyusers SNA޻+!Dj;~$@|;++lM| {tyN273sn&&#kH5ˏ&D2.e7tFB#岔)XGBX a#X3RKj۬;E6ɳǜT*ۓv xcS0M+bbR`C+j<6"$]'^e-GcWym-mHu7wo>_BJpyޠ_{ΜI3կޚR*{$+__e/rG>QکyKX)3.;㐉q?ay=2aRo[XYCϏYOĝ5o2o%~7}E+=]ęȹe.A|/x2qRǹ5g]s@||3AaJ KQyc},ve/c|OS*^8هSyT'.?KX?a܂*?2gp>ԿCXW\~ȕ;>gM}6,'o;1Z/GCu.۰\>]ޏZCN۠Z~lqa_\qba"ro}3ۀvV?s^ !$q[Ma $q;mx/wxOF'?צ卻Y8 ~_k3h4Wrxr?MOj76ͫC5+GƝ{ x9~YNdnI}\xI^=?g7ubZ砳\ZX]X< qܩҢMA9;WZ-?6n.ތ'5KMASS/data/npr1.rda0000644000176000001440000000233311754562034013336 0ustar ripleyusersKh\eǿyv6ibk2d)D:!A(HJWEqSq.D.\.m\u%h͜sN ry;wws;Ƙ) W0f©Mט=9F!-ZW 򐒬;7'%e;9g.O;g"fIFC~3K$M]I{S/՛W1E_Oݱj,-w8˶3L=s~cܲ_%}sTrݸx^',(~E5lgb_UqWRC>rs+&2XoSo8iTq][VGK&=?{i_XA/'Tr&HS|_ bxUqcjulQ.95vW9jZ11oGƔ>,xcncT>|? cjf,8$"b:N_0B tW1oӯ!/:b|sRa?^C#3Ag*/] 0[{0Gt]"75Uq'kcXтg8[>S֘/1rӁxvKoK׈|6^^Cf@ A|߅/d7CmӷF匒<:bɟ1%lE׹"wZ'EI )+ ̈i%}G̒}9A7EybkmeZ;y ၗSeIyI6No,O=vyz텵lro~,N!$ nwWE~ !nX} $y H@ bygcLnsJ,J/jFӋWEgp^by3ω}HJ, ¾JNARlyqrskQ H ], ~N(YqeK8kkAp~~bgX )]ChKhr , %E؊8ߢ(Jǿ 1MASS/data/phones.rda0000644000176000001440000000040111754562034013744 0ustar ripleyusers r0b```b`dab H020pib CHK8x2g|o(}|_(|(4LBP |D 0A iqL4Hw50eէ k@׆Һ`c:HBԙBBU'?iP~PJ2[yb`GX25 91'TMASS/data/shrimp.rda0000644000176000001440000000020111754562034013750 0ustar ripleyusers r0b```b`dab H020pi⌢(' 9f98408,  | x*n Ciq;:Q@QC1N1MASS/data/rotifer.rda0000644000176000001440000000067711754562034014141 0ustar ripleyusers r0b```b`b@& `d`E%iE @.+KW~n!u܎ a,KzNy[Ss,Aݤg쥏82B 8flLG&]qv] N r's o(T^ *Uk99 0D^$s8 }8-eXb@l bP3l'~7@:\^:9 2KT8ObPĒ@@ g!+ 75BŖ Jxk b] N>(Ʉ5/17&pII+,rY r`l6 $&2'$g*JI,IK+Z(\$@ XMASS/data/newcomb.rda0000644000176000001440000000030011754562034014100 0ustar ripleyusers]0btYQ=zx20MeKLLB<[}{=}%-yS{ ¤<<,+ /8ɏ3̛bAK;_,|V'/䕀&.h۴@_&MASS/data/quine.rda0000644000176000001440000000101111754562034013567 0ustar ripleyusers KAƏfe"A/RdARH\踙 (ܳg|3ͰΩCDT-D_Gd _{ZLCuh 2ރsC:8,uzY pͰk;n[{-DgДΨfޏSKW3:RA'MRkhm͆NU;Zߜ*:SOvQ\TNLWv3i<bFc'ge&΄Q02sc uHq涡5QzD*#;x9\w? ]-cQ05mE;~S7r^h2Hļ-s1h},g^Sm=y'eֽ'dJ'ޓӹ,ι[p9.+>uiz)S|.l MASS/data/biopsy.rda0000644000176000001440000001472511754562034013773 0ustar ripleyusers ͝M3q5Ha`|n}5:0 l#An^nN*_4^^W*"뽊=oonx8ڿOhz9{o? {icm.R=GۡGcmơ`8-9 ̽>$XLb4M-al-v&0N=i9b8'ƾI!.8`c-GcK )4 4BhGiHgNqK[>Wbۋ]RTP )@Y2 Xnҙ>t !>ХC[f k<}N2z-oň"4=by2ޒS|ΆiZ@݃y”ərb yLGvwSaZZg5.9 J.ܓ3CU ʉep)LݐbMVJ0i-8䝙`5﬷7CՆAhdz PygVv|Ve3pҥPL(f7`s+Ȋ-4#X~1e4eWi|'Ni ~0YPz6 IP4lǹ m(epYXpaI۲b.uC[ʶ8Hjk(~,2хȷٰi24EquVP! VbTn.ڍQc4Y@C" 7B+Ȝh1!8nEkfq NK+Vup(;}XLe<Slc ŋÐJ ɳWzT 6bSMVi4ifQqO0LU;jSQT(@s,cO15M@5Enېn<(RfkS(J4~r4EvDH-Er>MպMG`j0w"eChz7r= 24]q7ZyDwolU5YwE{ ZXarx<;NhܖF:fm6\xÖ}h#9ĉe*.DwoI3UHƚFHs(jd6FḓoUr  L04]y=hrZbYzdt5CC w2}ʩe;$,41'Cvri,¥j!myG%wcu&pH;l[Gvml-Pt7 [^[I#=BjlvFt!QMQ,d LM} N<:lYShYqKe00#˴GJX>}c (k2_h=IWđYlP}H͞xEl WjȏRk1^k>wASz+TH(5 X"̏%fˋȷEr"uE"F| i۠ !t&-M蹬2/@0r,͗Sa\-e=vmm"fsmizGHB*(\Lbn 1r2maZ/1̖ 2h2̥] '91ƹB0u ? ̸alX[Ymi17d+"0YaҠy:ȹQsf,fS>Ô~H~y}|7/:sxp?}l~n8>`E~a˝~:}o|΋svs=_0y(3>U\ Gr+|8ݿwbm?mt(}zg*x^@{s|<xopC*4)ב `rWˡwYg+}`K/orp}/!qߔ&ꄟ<knXKB>ev|c̹Tp M{h#OE`z~=eMrۥU) \6I]~19㡔ːx5R?|?!# 髌D+tˊ~E}y{gPV{=˸_Fx΂',IuTJ~^yIK hBq=QpT'2trrc%_/|N9m] m\}՞\7* ~ ~T=K?J_5xڣ>u'#kr/93|EM}Wk:{u{>l6G}-=ct X;o-֮ͭ1sO[;#v򙾋qv: 7ל8KkR3Gޟ{$-eq}6= dzNP.ۖrޚߡ\y/]>j9LjN`-~=u:EI:»Nb=}Xڻ&OICu]*Grޚ>M豷l-eO9Aw/~-)>:}h s'r㡌Ϟ.^-KO'䶠yBj]g ٗ贆aj }rfO<\URoPMkŒ>k>G^s:zp^ tK_˃F1r_Q老K,Tj#cϣg{mje_=>?_~ߣ[kyM59OCKZ+aS^^#g5;훾iKZ|[^#9E`A9q,/wM~W吞~?a|_{\I0擖Y*}l}7w>$?^+H+%*~ =tv(|WBO{Y:C},gYCY8]:.j9 mYkg$sI;iOam=e0{o5_@;wg}Xzjs^ܖd@.[u˿9Ydϥ^o=jo_e̚=+L[g;Ļ;h=R_}ϧU?/z(^y[v߿^kMȘJ{7T9/"O͞Mp}˹~Y!=ܺ{#;gt.3XëyOPX읲^K;.Pl=Wa8~e;貟pǗCɋ};ϹXv ڸXC2eO2r}Vgt޽07"ojWP uSϣ~M`8ZC~| J09q-}=Zm\+|iO^5[}Pڈ\/ʒq@GUzz1S5}$ /yЫ,#60ߍVrof V]RϡT.KvMVy}wU웭kwqUeyW\媽+kJj^-ڝKӗCtᆲɁ9sp ݷ֨uSr&n?ZoپƁG?K>bG0=oV uQ>2{zVgfYusI5|﹇/[ktYp[x4vЇZWuYyr-{,šg FԳ1/W}nڳd_BwY#>Uh潮|a>]j {dO~j'lJ_ckmң|Kw3{伴n?j6AW7|š|m.ϋS]Cb_[R/N|'ĭ=sk/ŇZvgI_yY=g:k9?kpFeto_y]{g >Z-KZL\_³TƷ}niN_Ywߒ#==cW[gY^,Z[gך^߲-YӉs/[%Y{&-ݒ˳}eΒ6=;*/}U_˟Kxsb~FOյ{]z og|ÿxY|k_|/?ɾM_ܧ_Ƿ-Σp(M!Ҡp$ R)Iqںq4uuOڴ(E ?ܠHqۤhܞ3;w={ι{˻^}/HqF6cs#ip,U;NJC@q%QF@-)@6!hdW6 h kk-`FMu`jC^l`dϔeTA-\R8S<$MÓEf KbxRXzr͛uZ)yt՛Y #6ac`49;8ApZ8;A/w`Q`EK(^F5%N 5x'`8nw9jp&1GM' 8j Kp`'yG 8j"&x`Md Т{%dua JM_ /}ѫFzN]u?\5Y\"xTv ko^Ѓ8:1n8vGгOp'|m(ׅ=aA,Hc R!ؤ蛥\uߍ}G|C/z!v!{>(g O|[]C^3uNү.b -.)|oO]ɛISX!Ї?~J8'ZڥKyR?'~|wiݠK[yNȉKK;{ENʁ+h2aGq*g肾{6%;d˸ H}@2wK]e?я8 {EߵGaaW}Ơo"~kWћI?)ﻬq/냴k/s(eƃk2Ho]e\J9z\?~ȝIQNE;W (O/VT,qq/ 8A!Ҟ2O!=\Ҏ.2/[+ծ'O֭H'vԩogOO28; GL[||R#Ǘc<O8%rc[A<24 K6o1d7mtf3bhEz=tly9f'>v{6gOvZiQfصo] }(oa62-Vl51ȼF{ȑ5FYrTi׉}d;5>5N^_Wz 8g^C4Vi^ ϖi\L[ɒ5mYeM55~vP/铉VprI֚χeWku]u']z+]w=lUwu4py(,gU>eg uhrb-~=z|L9]wiKRv^VyuqK\4 ȃF6uAp;=i9_eIcG#oU7~OkBZb'e}-9[@G,{9+QSo)ލ(Gczg>oo]#"m]z}m}OMhgz`@mzIeGMV>i)_V쇼#+?!ໍ ^&~>e q$)G_~>:5-Kߢ9aWuʺ :e:n>q K|G [C?@>F ^/CQ篁:>,ld(Ǒ  ek^~f=1jmNnKɧݰ KtLgy &aKiVe}\3XQnbJXWa;2MzC)z^b7f=Lc63Uu``cq۷Rfim@/1M}'d~x#PK[ z[LqdXg<;C~GH߈OLo.-.ȦNHxˑo(G>#?~Gٜ4az1=[H?;Lz^Q4ҙ-^ ~-BYjΙ+D7'(G~jC$GoE*=.?#w?F4Z>K8}[m Xw;m[.cɔmGmGi9vLڎі¡ס 9󶋖ږ Hmy n C!/Am ~Q[їE6Ҳ|oy3ey\W!;hWmtq^@^Gy}d=o+mmgy  0ˆS̃N[ Cz:=Y1<zy3?}ygFs)O"{ r7gȆ5!hWQϏîy |tşs3^?< 0!LcA hg@gSex4?dGc0f;\rF]F }>'Sl+Bt yFƌ}9g_Kx ğ>tאގ=HFyAWaN { RAv](g'nؼ#ڃ2Q݆joy?|p ~$|ǁOXV Z >\IXR?G#CZZ|;} X_ZzrDN%'>i)R(t{Iy"/{H)Og#i>Үbejl#OL/:I.t⹆tYzpQiuzZE'b93^qXbZos8!-fЙl}ԁt'ʖ6v2 ^kh܇8@kTr=HE>K4io}g[T,?J|hth'%di<ɭD,Coٿ'L:id?mZ0C 4wNc1uW*c\+ Kt mV6}Dy:bP;|,TٹvU³=r\WqteU C[hk:iO_I܍m㠺@M.1Skɻ{{m}M?- !a A|<ޫ{ pC>ʐ=Mx{ >?0S>>iWi>ٞyDBxᕒWMy8=$7y)fn}8UyhC26/,Z2~02,\ *" d K^'-AZZZGV O^vHB-jM)w1!n2('#>u|kmuDVI7LF".̒J0*DZntJ\kEtuJk'uhF2P@uf91%ò,ɐ|-Mʀ&-*3ºVޚnYkHH/EK7j43pI)KD(!3T'{]3F0#]7cتhf^AaUu!nM6ڄf79 ̙/p3\up"}Cv;fuZz5r8fҩԢ(WtXN8FK% ,Yu !S˳a^HjNB1S@RMASS/data/accdeaths.rda0000644000176000001440000000054111754562034014374 0ustar ripleyusersm/Q_{H U"NUHH%wLE`#z5a06l$>K}^xa4W)*e2yfbq˞Rm04 Ө6ԣ]y/zDǟV=nzR acIxgF1& a}]jMB拎їE9}|þ68#z(|:D?r>wm11ѽ} <}AS[orN#\wv {,3ղJK,y~%S\u=sN)I1'+Y[MASS/data/cabbages.rda0000644000176000001440000000071611754562034014210 0ustar ripleyusers TJQ>bZjVJ AQ,A5]( k |;3g曙3gRB°ijMwd-ݑrLcc>%wo>MEdvNƉvrĔb=C3bNu՘/L vkZZQ>d¯5JWȽZNQ'g F u#!bOEޚo,W?`կ cMWvWB%ғސkm7`/F@ݾ7::w|><1N?-#M4^o'G35oA{.+ŧ1Ӑi4|d⡩xf*'⁩x^*2gx /`݀uf9jW#拤j1WcV/ffI)WSKj3g|e#Ew:jq`iy~Z^?Kr#& /u MASS/data/beav1.rda0000644000176000001440000000141511754562034013454 0ustar ripleyusers _hasvsĮ?2a ͟m4 +irq!!@r3I$Enw.}}өX^YLc&giN1 , Һ ~z1zMgZ~D>?!yNRݲy?}k'ߑ^.H$}$>KI~gs]u>*zIS]Ug#SGU'?\Ѿ#2N"0 Ij Lc5ALs2ӿՙ]ag A:e&Emyyo,3oŕ MASS/data/birthwt.rda0000644000176000001440000000357411754562034014151 0ustar ripleyusersYlUU]tnTqzD PZl EǘQC1``8$>}"&묵9J'z9{_{Zk՞qŸ5W^ohdJMxDx W쿃},͠4} hAk3Oh@' s;m 6\{LeYՊ}Xc js_q>7x`Fn;:,`pmy|;}Q7s̹e=''{w)Tz7qSO~X`zs^l>J#jDB'(}g3^{߻+.)Yj(V5R=g1RװP쓰n\KXY֝yc!7fyQWԼ{O>>QQGuazpu+:'#9,2U__W=\  ^Qp||ИTfp)YV:le^rXۤ/3\ -Np92[,v3{i=ϫ\gAЬA5O/|4z0Ǥejax ?y^ꚙK lOS_Zp#y '<~l-⃞WnGH)f?p;Ojbx*+ʹ t**[2x|~gKU H0W,O}G2߾]k1q?}eA[>Uu-^\-m֎sVi]_P3>(.;6&]A,1nWkk~tW η?RH)+A)RRJ))r^|\X@ --&bZ-fӢ%" HEB-jPZ$而0ja¨Q F-jT(nIuK[RݒT-n)uK[JRR-niuK[ZuӪVݴ*dT! 7fT7b 2Af"YͪnVuoV-jUݜ4Z"9ȩEN-r:-nyu˫[^-jW =F#mH=F#mH=F#mꦧ6zn6zn6zQ7=Fуn$mHF@5I$I&I$AFIX$QE|HrI!<$$CtHrI!-$$ddBK(*dAiQ,B9(-*BH2I!$2d A/(+* HrI*@HI ? x'}dD&MASS/data/road.rda0000644000176000001440000000130511754562034013401 0ustar ripleyusers]T[kA>I6mQ!/%xCnLj`h[Mt[vV} "蛠 (xycA쏨~9fwsV K燖(N1-# h^Qb|a%C4| aS|#>y{SRҾ@7`|K` n4p ?Sȑ#'^,78zzuzZ1e>9+%ooJ^.y2Kw^Ha}Ifg$ iz]{!κ}Q˾^b|z_УtyNsFzE[|N4_EuusT0ԝfG,~f{}0t&H+ޫ}c]Ds̼lwh/ݿˬ g$y\}IG4M|~}2Lj 4=%Sa:^6 e67]ov5[X ٺ>=w+1QDS uz%ׅ#Tɼ1nvԥix!2ݴ]2Ӧ=L PEMPiVp+mtZ*Qm[aea9fOx÷.˧ٽ{'^oZvb;oMASS/data/Cushings.rda0000644000176000001440000000071611754562034014244 0ustar ripleyusersRJ@4Qb`TPQATW".j"iC׮ ~@"ݸN;gp`rνs=3(̥ iX!VB~ r)rP+uؔ]}c<8IvN0×qE GyN/)ԍpgO/ 'BxB}qCٷ}}y;׀w~w b>cz;}!;9>1D^cq>u-gl1ϛj/{^Y(+FMX6Q@U{jJV&{vjN@aT |K/ JAIkSͯuzWyJu* N IYbs-[$KZ.i]ҺuVN 2d"D )"RD~KXSkMASS/data/whiteside.rda0000644000176000001440000000070211754562034014441 0ustar ripleyusers TN0v4Q` [BBk:Rڢ$Е׀ Hl R"ը+XJ.wg݃B3a)>-C2ma Ѡ3k $c3׈\oy2 {qȁ~C1 2{x,l&?γ®9ܥd6O*2pYoB6pjU~pvq|Zrޔ u ĥz:V8U5m Q?ZoSio#Ξ´:#K|}ATgTw ~._>y~[O-ti\X[ˋxʷu~~yyz:]tw 9zLQtX|xőT{?;6O:[;c0{MASS/data/Traffic.rda0000644000176000001440000000120711754562034014033 0ustar ripleyusers [s@%Z(%$ Lz>ժxPap_௫;f&n_)oq0MBJ1N^V(eHL/cäs J?Q0 Md MA%R? ,0 sprPs0 K + EXa6a .eW\pnmwa}xavacxO<^kxP >×z{s>( rdkTzDկFKu3HqdT[}D* hͫ>J'CZ+H2dsXe˼ )XR.A{e b"s؟θ߂k#pl%x,,7l[{GU-œ@S|CbQ?yt{2=?^߾X1ɍ3tOG MASS/data/motors.rda0000644000176000001440000000034411754562034014001 0ustar ripleyusers r0b```b`b@& `d`l%E @H5xxxN0+@m%@|QfPw&cLF<:bPRk^bn*0Q0<J A8;37NN+F7 9'f#T!WJbI^ZQ"H+r΢r=cj p@rMASS/data/mammals.rda0000644000176000001440000000223211754562034014103 0ustar ripleyusers mU]hUdn[LQؒ !tf7[u5 3swgɝXЂJD_CPiEHAbBASbs' 3{S4;UCo >;;E)7T]M3E酿}}*. zCK/?8.=~=zQH&(G?ծ>\ū_hY=Yn6(ZLs\;'9<-3k/OzVB_ܧ?Zsi ڭڋIȧD&WFyqCHQZu A\Iȯ3_8?0]5y s_K}U ]$vOAi!7D/K>{oX/4 Xj !oG~D9upߏu܍}q~=[ZxX؟\gPֵrThFHշ}1_ 'd%jQ4QMq]"T8 ppN]T#j1GjM-'&hR6Qs89Ug#‹> A1z/ķ@OG,$9Njp"M T0lx-J. lme) :A5ڊ~C dxB (Nm&͐[ Y!.K +'MA9Dr2sAT[$ ĆG|dK}D@VVUm6pgN8pwRč0\ d0 i '\UuPM/Ua88=kSȣ tzWU&c[4}Z 6dS47I !Qxf&e򳓍pa˨f=wWdqqYf95`0R~}7Ak*4l}LQʬ;T^.7/5( MASS/data/Rabbit.rda0000644000176000001440000000075211754562034013664 0ustar ripleyusers j@+ٱ &͡Z&Ҥ.Rm_s8rc%Lߡ -HYȂ ¯oVٝ [fqM4ir0ְ$k8?/*!up7s ~%uxl@S[>xJS^6xXt_'R(|-XsA < SRRPEՒX UȕXV4 M9gQ~^ c p1MASS/data/cpus.rda0000644000176000001440000000666411754562034013443 0ustar ripleyusers it$UǫL&L&,*$魺t{I'; %%΄MA⨀⾀  /9?xF lSlgMp.8v` !)ȁx<~O'Si 1x <~ 6:fA>Ajo<[mLLz(Z86ngɡ|1,Y\"Ւɀj7 dXpܬYv̲ߌ]ȤZVPsdj;dR}v#3XI Bڔ;k6☍8IΙs*KsR?YVꍊN'3Ӝد'Q ٤TOPO-k#9;R2Zd(k6eJb* )KviHnhCLOE$)'"!J݄Zehg1ī,irU_l۰SI+ )$[E)j>9[ BJ*;g&1+ڕxu ]8&%){Q)Ȕ\d2ׇSl?kwMk䄽!7ҁICRq*\&\Lݞ+llx.MN*SӴy{kXWiJ&8!%R򾲒PUnacW &RHG)HkldHqqȔBTi͉;K1Q+ٰ )By\-JuUGj2RfX7՚MeM#!JoSJ;n_s܁L 2Ҟڳ=gW}f-Mf2WPw3[dِRaɐ+2V J.5 )N.Je7IŐ26Mc:wHThώiWZqtbE*CSf9mr,KL7vݨ\4Xk vOvA ˏ"Z>'`^8RNHʥR`.T0-orMfn2d ^^VT k婒VITI!\I!3Q۳qѲ1h9E[j~gHμk rWZ/Z?t""Y+Owٮ[PgEeȚJ}Oub2^_K*W;U*xHjQ1yroZU&>*fb fM9@D*f9mF\e^@eS'մQլI-$o7%C.',’|yc cMڵreܖz ^/leVBûeo}1ٚq KGh uޤM&۪AۘG1ڣm#º9slLstg dXyyM[#Gsyx 0vQЫDu0Cpۣ݊É(0{sm X0%$__Qƣ%fۣb{R=<;ƜV6m}'J[ⴝ;}nl+K1jd- 5KXlcՠ+C3K4Ѷٹ@.+Ċ*sǻR[ق7{WYD۷yt]:?<[T]^-Դ̿&fu?But">ծ !Oj>)DiRWBCGAn[g},.z򂔗K~?>&_U]ae.m\ sGO _&!ndJ6(!s<.5˞F ;zLdlRYkbږ9E5wyWt%]iLP ɎS^l8+Z~wi;O+jlR?Lyt;8R۫ƚ*_>Sc+~Hչn_Ǝԡ>:*˖Ƌ|:,M 714:OJSЧ\gGo&A>zߡ?ŬLͪ޸,>|#61IGJ;Xx9v5?R}ja^^0ږ9&]oEr+<.Tԫ$QǢm_d^1v.nGQ:"=y7P:w$X\v;zhPꜗOT K){2:2XRO.w2LeFiؑ/^xiku8Jԏ)ZȘ貂y϶}Uj=2^Wc?]/J}~<>v3w#h hF2sF{4kWNct+ǚwEn 5#|xm5KR;ih缔S\?[)~81.#H6oG;tam|[` Ot,iN:J{pP+Եg;gq=o\H=1uОmMYQ>%Ns}Vb-{vdl%}\7׮Q:?{/U^biO'I~kncfq9F4x+}G:JtLnoꎡwbܿ N?*>x,qd.ё(\Bb7zm}4V i_g lfsJv+Xg[gr/h^yVV0ζp{d#_}Os~Ө*lGH+?ƐFGD|1暿-~X#> &s~a۹ Ӿ/:z˖77_sim6;43c1!Ǔ06qcːbݑm ]w.:}<7ĝĹ=a<#BYC+X[x >[%<ۈ˼]y'3S6缓'zx㣛l ɞ?o|&)LFl fKS"RZ=oT:EHMASS/data/Pima.te.rda0000644000176000001440000001054411754562034013756 0ustar ripleyusers tUՙ!$ Gg S@dQG >JhtLTXk.S[ KH+ bq;gܵ>9{8jYAiezO Ύm'9栥5Aкʹr]h6Z(CAD͵:{*4|[[yvmb=\W .C->nizY0cf(_ huze3=ݷq ^e&*84%1|Om=(Fe͍P-2cF8^0Fi2=_imMc6+-qidl1=Hxm.x.8+0Mk֥T^Y}&>. R+~:9AbyWAG${KL3R6Vnrj+묬VL+LmXae׬̳o\E[ʝVn_Y+|EVrX+Ylf+c2DVE컭ʋV|2<~f)+?9+Ob+sL*xW|0Zp=p #V[y,7 z?/oQ_x>8w9a\ԍ؈^Hgr}>՗ɁwկvWOtjC|tNXX:Q.uFsyw6אe^|_ Bu`7Zl{'D #Lʥ Hf ջG걁,RZ< ^茎˭mm'ϿCw/j\={vNg0i @_6.W{\]{/>}/VH`e, R=׍{Tm{IW䀭/vE]ap]DblL<>?MEI^#K>1+ CObFῘĒHx镳>h=8R!s+VE%+_rpTG &FWp__n,x8ې/~<ˉ_WEG8Z8ȧ* ZX_W$,W'az(W}'#X(Rr gx܏nwkٕB_?YWnOz܏$~*QN~ JbC+|U}5J-DEoSBrQ=1I|}L}GF70gE>T\s+S?o n QQ<^[>{x+>>/J$qU1C]oXu$x>V7LRwGVSqV>T_{%c,_:ߵt~7+[[We4z~}(U7|*_#{3:Cέ 5GxR9 αĩ$Rsѹ}sjz|󥂾gcy_$ZO=oGTR?pS~%s@y2OQ]*_+_]k}##buğ(IzީOO:ߵ_S~nt< n)`/3Ec;Ѭ#&m>}8aYo5YV=iynsHkom>1Ϛ}74[x¼?pngrXy1-zngV՝2U1M7wiW9E}͇nkƿp]/ŏYf;ysmE9.~`rl}>tl:p7&0G°}ͦiY/~7/5vՆ4{f俔ym?9\6\v~TwSԊZgw-oH]afw9?1;mv9^msF/M+ǗMf~?¼nf9!x/o1x{i"xl bhŜa3'\Un[m˘wnuèMEf_aZz9 }_CixD_Cyzg\#mN;z&`?p0_2N7g)llÄ̩f .*>j}yQN]ca_o}ZOwma-|Ws=2]nN14;l0p渫L'^GnGᶘk9_}bvz-yz 0f+y|}45=ъs1<2['Ϙס0LOɝZfm2|=gIݞ־y?n9L?R]ԥ/='Ff7PFVCqf#W鹝fcG5a8ԑnGs/5- DsuC8џ9Ovt3Gk?fNƟj^ /j➂ư1_z>5w:k-;s6ҼM\;Lԧ:]˹|y}B>k9u繩}ywyNWul =#xs}?WoN7Pޤs~&x~98լ՜ D~9!44;Sm\VY"=ca:!,sQj3ϴ[26W#$gW'zՋٝz>׻bg?Nvz݅*F u;lunflz.sm/dφ|1l]΅^ v>Bk}^|\>u=}>.׋闯9W·bjYVSc\fI$ڙKd 6sf޾t fίz& -C1[g-*}ɷ\ Y墚ٺyG޻&cȲf\:sМE Y ~YOPeG5MASS/data/coop.rda0000644000176000001440000000165711754562034013426 0ustar ripleyusers XKLSA}C nZ>npC"(L.H7, +DܸaZf…b?HQ~Q9tLdzܹsΙ;mRl :s|Q8eO$\.uqOEII I+ΓV-&tnq2X(霑ToF`sq#CE0+i{ݮt vEґg!B*ߎ J)9QSrDMɉm0EO76}A_wzt^͈z1&R:s۞x=Z92 &V+ K^:qWL(Qf*58%*xZ{5z-`rjXbsK^b1{s,dyȐ'ÞF!,>7ϦgvK mı2)0%y3w ߫%8_Oc4a8y KGVjJ˰^WV*~~s|wy.g`Gs>agx#k[O˰'Y8)Z^CWIqÏ?e0 ;"^'}u $śs< p/n8o{Ө+ŊGUd~xY7xcO_+03jU5ΓyޘWD_;3vgCJo]Sz!.%:9Sozcݮ_kĝZ~ԏd%CMASS/data/beav2.rda0000644000176000001440000000135111754562034013454 0ustar ripleyusers ]HQ67wH.EĂ1y#EQ"XћdM7f!+"> *B^EEXMR,J2"<ǃuu|<9?gcb c˼E[ŇRֿ=fc X/c.k`|Q(Fl;At+07ax ` #8.g & J` v6 @ Ab r M r08z8ʘlAk i,eA/: leC/{2z0)Y  z0 J:\lxl\|XcӴq,c]|禒2~O}=. 8&+Ok}`lw\v8*Z~IxE?mܖ5Vfsq{H@7NX˙3vߌK뱇}OW9 U>UL^OH\z һ@fkH*sJR[|j *ja;JuުsuZib~JSߡd%CEK:TmZd0NAݢLcsrlNQg7iCq m3nͩGS vMASS/data/stormer.rda0000644000176000001440000000044611754562034014154 0ustar ripleyusers r0b```b`b@& `d`%EE @.Ht\ tЛ3@p!,B'Cə@0l})~<4*^X6"0r<[ g:,Am8"nUg 3w/ԼPx灨O[@Aj'T[4  w%ּb C@ař%P(%$37݌bP\)%ziE9`ւ Z54MASS/data/Animals.rda0000644000176000001440000000115311754562034014041 0ustar ripleyuserseS?hQ4Im-\,r:S).mjkiD]؈P\.*ܝDuqѡK&!.#$}?.򊢌(5#~⨎&=74E8̸7f,_wŗGspƧo~Ħ5]Eb} }C&޿KܯI<5/u݁߫9?y}qPQ5 9ή1zK ;7MihXK 쫆'Q⛿;$޿s8cH%-Kȹ /K}+e]Kj8iw9;AVW;us΁zGY A`f!\I=7\h)7XB_M8+ m\;d&͈z4612YKYYQb@ӈK|:Zb-x@9Vf!p5]Ime.JQ Ex~~x\s]xMuF0>Nwn7 s}noƽ?Bv]wpw ]^";\?_8ɛ < Z8DgOyO:~nok9~wǸquNǩAW?Xy0Bqp̗+G!?u.8N<~x`8a(v>>qz&a^?י&g9޼nqOSXt=qHyf| [g_ }1?nENep8rʑoo{F?㜷xx?ʧ,yW׌_>~u_9W)_2 \?˜+y:GzTg2W3u1sh~🠸8w7?ăŐIăryΛGO3XޓDZ=|>|xa9dw&// 1b~b:Nɽs=5Ḻ9)S?{$}}g0?&O>{-sp=F|byo-y?T';9/^rN0.W|i^wG.8~v|h>xEƁ4 <ruWΌ<եuK[.?Q֗yFsAi,];اr;-.V2̭֨xF1 {{Szx® Vѯö.Y1ߗ xéu2Z v|j:ꂽqi@n/RE1X?ZbBҞcnc=[|0j6t~S$-Uqd Sa|R<ͅy)*ƝOccX[Nޟ_>>u=57\VO7 CHq}Yb- w'd7ѦI?0?u)znvtn ^?t ءw{9bsϕQصX= ˭wi RM o_ Ր5Xhl.ĺiӥ((j4R/NS2}S1 3mk`+툌31Ӱqq!Wqۑם S6͍X3ݓIA` CׁL Z|1!Jz/3W?m|;+:2\4+,!Mctm9'R-H<JXOTOs~yr?dkMQރAK0ca} v}'1`xul 1==/~s0C쎃ST@޶G@ܝo^+1#ث܇W×+mȹנ8U 4TlڞI̍`=W`!ؠ1̴mF|! q]b}= os*XJw|"/pC%8 B Ӡy>ة՚z!ٴj3=>p7$tf1w {'!Wv֣^w#pnߟ-IROQaW);"a>3 4c -&-kSkMeyqh#>vlt\N\Z͆8ޅX7Z/L<_SQev ~lз 7[QJsR}e-9S> ,G`S}Y~0~l~=I)1NN6'v_Z,׽6}Qm!k6Me{4`RnS  k6,x۸k1ޥm5y,!Γ!inWcɰR:Z$wl|hžQ0Iڹ9/Om$2rp*ŗ n~\LVq,t._ yC^ I,8*_qN=ӵ[}@brӥ{Kߓ;X8e_YlHr*k^'3r fכO3*krQGƮnKarv$:apx)֢jv61~lΗc lU*u=`rjZ[B,̿2m.C>"~ZذB|G'u 1{ yV<~ ,<"˚7Y™#g~nXAˊ";_bH{_!—[̖ i_.6H)?ݠkp;/bl+Oz&s>lu9y)`*ϮG͍FE׵:EvPs,I^wR/9ѐw³ |~jFWq偶V _X_R?dFouOG\+n3t];Qӟ4.^Uob`!cnB\w}޻'HP2CR~/Nm5XF''VW q^E jߑ57&ʌ?l uc6Vfd p87|zV{*ڵQܬ ߺ?ikCmz 5x]TEj@pUH^k]ZY*oPoυ&Iӿ3yMYFfS[~J:um|+V$_j6N(Ίg]n[}!%u.$z'5&g莳ۺ)A۳>踶ÆC:h_K,]ih΅<)}U)\h|D5YN .9e|;|=_bqtPҎų)ǿq56֎J`=6(|_m:sAsTkv}ȅV߳԰ր{Nk9[+W?u }o(5v-P@kLSG_`LJ]V7irGgKH(|+|!zɬ(G{Ysm3~X>h6^. 8Ⱦҭ'pUA-ZQ,}MWUzNwvlu&ȿaS)V{fbpR{J-WTքPs^< .bOEW~ t2.hP_BNi%`kU_cr(Zw`NA9b*;wŻ8pxwmf=f~hnya,o2.}Or߯6{?֤o-X#y镗\~_z_׭lnm/^Wiޛ&kn{__ޱܭ?A#eg ejEMASS/data/deaths.rda0000644000176000001440000000053311754562034013726 0ustar ripleyusersm+HQ쎯 b0 6i->ذAA"1Mb6&5 xŁoν真ugSnJDbٖyͯ0KddȰu-w}.;<\Ds[`.7?J_D9 R'd:N:~Sn~SE{7mKe82#|2D2g~]I3sڱwI(g9w2_JJ1h/&|"5-MASS/data/Aids2.rda0000644000176000001440000003674611754562034013437 0ustar ripleyusers ]]ET@@"ғ{==!$PT@`7B&D<;(*bODaW]4{WTDT,< >|?kp)߷={f͚:7zD7q2? 'O&87m'\|ʉg/wn:0! aC0! aC0! aC0! a;]`0mt <^ pp0O4h<T\xl\x3x/S8X 57x8gƉsn <+\?6gnGڿW[\piՀ?ƫyiKkW7,$+y,}qd.璀W$!xMHjp@y>CO H{)AXǂ'n3LEpIc?dY#_!} x ?YLc /$%ws>Hަ, K|x,y N'?/%x9G\x<`o}*:'!Gq{J! t^ydƅBPݔ\&<}̓b)x?KsBΒ>-4N3=7B&w*A>F̳NM:Ћ $%W\q)thpv))/=wӓ/uO 9Oq)o ݛ X,8k}.M!ɿpT;Yq|O.^j/MKGcҋgB8Bx!ߥ8?Ϥ0#=41k ;RB'?m%lO MRH] z$)iBRG ـnU\{(/^ 3ةw M!o/|}_ir{E)x3ҎC!Bex4Aɳ.x,}2 ϖة g_R dd|18X9tv9! f+' 4p~{EfEBO+A>f8 ˎTIOzx7>(g1=t[;~tRN炬eog ({p^Ov-hx/a{껜<;AnrCd72^wʡCs< ?A%4ɡrg?1szBc :,Nȡ=,OC95oo1' îWqsud=x&ϐr >ˉk_#_xCPG aN>y;U &-`J쿄_|R씔w_d/l~A~8)u[N͋s5GClK?p\B_+tIw ۸'2Wp _(k3]iyJŇ,o3(n/Q Ogt/d@dzBWt/y߭c鼐 \&*c/XEuTYQF>^#N9#MjN+ĞD*ؓ U+Ѿ0g|٨~z4:2_C*cMY׏zX/uIV=?ίS:BOЗͺlq[]Q>yu=+ /#L/V*fW);*St.3ݘwJBsɳt_Ѓ;O^%Ɵ2w}XyeuEQXv"Cfֱ:|*go '5o?J5F+j8tT\g-{h6]_?Y9K%@Β%KCqW͙_y;U k̍B}^9Uڙ_f~~NW2ƀlɣ p[a>8[& 0IsVOϷ=RcA0"k4 3گŷ0c5'7@98Hsj1+s6> gd` }rstYӁws27IO?nX x}Gj,K{Gt%>vV;Nc [ЧHoV1^`1bKVOsq݈>D".U>1 fʸhzu=b SCd"QJJNyϏ/Nzʗ}>^Vuxz+Pgg})RS=t'?RəWd>9eO͔\)=b,s̭2^ΎPYgj2 ˔!q.yqR^Eh9*]|seZ7Ɣ-|_yvq)i`s1}|ܫvTG~8Qt~<+{Jv5/4! |dO%0 ߢB^1n,,ę-\QcOv :Ԡk%^:] guAZfzw+?@j7jnt}YbZO. tX0z'yaߒ'z]! <[k^]،f[@%Y|Rw]ɺE;`.qRESN:[OAFRs`m7:uI߫530ړ/hָ͍ޠ53-N'ڢk}154\,Ѝ[|Cdwh 4̠|.eWw}؊F/c>uR?#KY6":ԛ$Ϟ:K$_vͩu3>Oo L} ~F|$3l k;9쐯۰t̙SϲA}~`g^+knFzzgt|el7yQ|11Lg87PX|2ާzs"'x(y|:sY[`~iYCXƕ\5(kߦXx̕065(^vyZϷeW_`'9kc=_ɁK]0D 挧qaw|o({s+ޫPW2<:e('̑!>)NR\m>`_sZuv>˼Ya` s~dOOA_ tbn`9|=+s8߂~=s;[ħaލk.pnM%DXB HY{AxYjsD5/tQy;>0~nXҦY 4-OחW+?Zx`'sPc9Y|c\㗇Ȫϙ0};ky?"?>߂MXӇx98!߮Gt|!=Yx:?-ff^rn~5e>eN=;C^4sߤP1ƆM9?"]O] }|I<ŸΧI 12q)bQ(h4 5闄.h{ڀF}>-=%}^&}P3eRcuey qZ^{fX#>4nQC ڒ!K_*z1Ma+.%1c͵ώT}ga}-}w_\.꫿U}fT k%.Ae60qn)k실,ژG|>7zZr29 FA{`AߕD:{~>V{Q!7u[ O_Qm|ZzoZ>/s:L/|{jp>#%Ξwg$gRvo2KzkCO#swa/nzzs.J0{3["~&uq߻/B%o:4֡NRbS'{˳bg}, ulA$x7XnO@4S =7ž<"qץIw8,Y$$$WK'ozK>jHG{c>ktgɚ |%kMkR̭xL {)=?sT샅]Ip~i1cuǤ; ?$V3e/݂g(ƄgJ3z:{#?SWgswu9ccA>tp_55 瘽>`#yQ|Ƥ/؇r[oOA+߇JsKؗD 'K:=boy98{_/@Ϙ|OQk5kA0>J ?4w-$N{O}煒H/WzaHs*دYSgIM4%XGu[⓱Ynyc>^ Xdߏou|G6Ύ ]s@Uʹ&Ɨ~]~IK`_-`rVרJ潴دG:=YM_}݇>br.lKM>1{Kr&e=!?5>wy_x#~=081/ʩ6z&'y L]OW;<1BGځ=M>De221ObNI`_;؄|tO$+6F<}|57 s.:ſFSԃ7;o&a}0k9FI?c ~"~o6gEOu #ٷǰϏ1*]mZKy:NξW_cCiLq8Fc^ʈg`驆^Jlg{b .g⯧G@/:;^"܉/κU,t,{v}"vԝ<3)U1^d֨5MfćOU@y-`9Sn~-tNzϬ$gQO>/ sc/J;77ctdOl2>\z]>sҒ=ώx5@0a/%MqO_}nWf72ȼ7F}Uy={xYa^RKN8TYAm?7<&m;`5;I\p;P2Ti׏5u^O|6= 2ƻ-tόU7o9R}"j0g_27{~ˣ{ѽ cρUBExÏ'T9i3~4;:-|U<&̾|MǞsLa}enqohosw_ç.` Ds;z۸uҔC;0O?xCs=%׼ <[w>i.kc}\L\}q6ŭ|ƣZL/7ї1nޫ=lG_l_커.?wWG xf=ΐ~U}3[>;l{j3Y=2~H1yf9_'LgL^3(Zјzn_`Gjkk? {C[ꏑ:`қ¿Ӓ@o6tXfMq]> *]tƥŕNG^I=es Qc ~nO+k ]_\b| 嬘3b+OG;78ukgnO)cyJ,ߝҮ<coWlkj/߫&/[nw[Û1c7%+-Wƞ6yw1nyn&{Rߛ}=lxWGT?mW;yּ͡gx=e5=֩>s>:걦$7C zZ n_/m!܇Zȍ1\)"kwb 4&|E՟gV -}ض/xlojTz }ߩQ_kw^YAԠ/jwAc gRS\{VcƾKwv9{Y%W~)Aww`XoK{Y+l}-RCݤ_ ' Qt;E#w܃c-GH^|/J3/:/Ir7G~֘p;%^O|MRwgݕewco*%/ևz‚+`uMyoe[XYs^} 묗ȼ70T?`7o+vx6c]%{yƌMd?/l&usꏺ[I݁40 ,7;JI| dpg-̭%uD~ֳܸoܨKcr0W_^Zf2o~O;Kt.L<({Qo _+WƓ>ߓ0 }ϓ,]Vߵ<>k}v3]+",CfPrɫԩ{S++{ebOӿv'}*ů@ex5F߀!iWO,W拘,2E`}|D}z_>Co/a-tlyͻŕYCG{UZ͏ l#>Ȧp4Z.{gX'?i/1{5\j.|ͿI圬>pJuǕz_HG]Wیasƹ/ݠ[\菜}\qNk3s\}_b65L=@3F֧1JHƧԱǂv1h5->ߩa?4P/gIᯱk}ݲ){y_zg}:\^7iWj>sK ޯz2)m7?_`nS6]_ Eң;Glzvh&4|!/>=LҞq|{gO/nj^{Cy]{ې~$/MZ^t/BVxһ޺Vxҭz/C~FDZ c|[?ѫLtv~8J~nznMAδՙ gDߵ {9^pjz9~r4.ζ׳g~xu©tnZx;a5B N4$Gk/{eoΩtNt;NϾ{^^k/<.ҳlu[͠{jcv۽ gnΪNcAl{czO']<^ؙA/猻~o$j3ѹxC3:OnI<{]liw[qF;lw6&7ιnG^eV{^ϴ==^g\tZgYo9}n 6cn}+nc^d^uw3>h ^mk7{{;Ӌg{3ǦBn6 ۘni~qA`<+N/t4w7y^I/|B+41ݾwV[Ilwh .|0&鸝x`pd#&胩'?ag]QryޘƪgA &޷OMv'uXz9|ګg%s[ؓNϵMz=VX׉}g%1 zEAlWt[<_?fGkt͔.08V l#+sq|Պ~;6k~gWz׷)ZmkV쩟xܷ4nۚL9V0:ҊcX|^DNtqm|nӠڝc|j^}/:qP|any^vy~rͼ6V5y6tg?|ԍwzfzLpv.Ny7ߟԅxild?g=qr}:fzgNvls [%_ E\io1>b-] Ufmm]ֲ'["i%'txokīq ׎e=^ixMtlm ^{ͿO]{b$_^3~Ƃ[{?vtD_Iv%O_,kQ@1{ Pgy }׽u,;W0|jlT?/Ե׾Ϲ>ow[QthD ν˵Sm5gRSj?tӈ޳K]>FMns.Lq[K=g]vN#.deva:h=[lL\(Vu9 /SE[P|q,R=Whzo7QśuA3}+} $\JCl<5'H҅ uaJ+\o:w3S8_뾌{+tO>?is=nW~2)9 7Jߛ-*]#GV\341;)0[[Dkù|8k/ yBNl}\j gtcMFr22:lL]3y/t| %邿4[ό_k lT_yJ傭7 &+u^.j9[ך͟N炮+B g~^x/qAuwGuD9z~F_LdDS9S6#6ɉ=~'͗2dcf쮝Ӂ.آ:66e.Tq3m˚ 6 o qDF׹Yz ~BxȨ^I5f/ .t}YieҎsJG&+r\?柙8sF\&Gbwi~G7\ ubӝW|Jkl@G,ְ=X9]q-.ȬNӕ#|{+f8nM……Ө>sۣZ\ȍw6}cZ^5. ]ر{u|os #`3U.y \SB̽c ۙ>GOSgX,az56?W_2x 9ؾl/wBle9P[rs:|'ߋ\ a<,d9JehRl[ll^MMlbr0qc1q`-\/uA#oe9WӕK#S& .>\69:SY,fiњLgEόIK,炼b@s+\i=Gt+l u5&o][yZG:}j5qBbtcpboyl#m_$X| y"'-f191k&ܣ,4|:@]@?}Vzqb>..o\B^3t MfָLn eʅZn}Fy,&4|U.m}O[+Ι~[BL,킼X dz\{ g%qb.ȃ\ZpjyxZn yN.,,Gh>ռ^Ibh՚=ڬjo 6d0` vjf_,8ho9%.؝\,? 2d9uns5_~~g)N#YG\ꂿi3X>l,Fu;:xjWtqݞ5j.k#.,$Y7F\]g6|9..9.c>4d{󯉾^1` |xlŋss>tťf?߈ dbD5G\Gt\#&&gވ yyiͶl󚾵'p.ѵ螝W[&ߖ˲!Vjt]B/S#ZX6jؖ7f֊k&qo 9rJW)~ϳ6vD0u?]+m_f.̆b ]9Us3u}wm?X]ۅXtd|c:SaXNc#.h~%v1_z"bfLO,jDs ׵.Ȇww0[tߟι 1lyA4o Lifzl7޲񦟍qOH\3邞X5G7}kϬj >St520b5t>9Ymd ~ՄLtq-kl1H\0';3,hԅE V+7a9d5@҅eΤ~.IqO:*-e|E:ng<ヒa1ɯɡՏM~-s .\+\?^20[6]ɔiUm`3;8_>{ˤ7O1'nXd2/<ڨwx𾍜IV3Yl+-ysc+t˦~'W4NR5O9MASS/data/waders.rda0000644000176000001440000000145011754562034013742 0ustar ripleyusers]TKhQ/3IԘJhEbiQljmf"BqQ-BqQ? ōg%.7bk.] +E޼51ɦWqDLGiGvq2 %rPjtPԢI00i`6d`-"sQ]Pql݁F}WD5hנCN 4آA[5ئA5ՠOxx`MASS/data/DDT.rda0000644000176000001440000000020711754562034013067 0ustar ripleyusers r0b```b`dab H020pifUnG{\c4v \`N:.UākD8@;PuBPqz8MASS/data/drivers.rda0000644000176000001440000000116111754562034014132 0ustar ripleyusersm+Qo!d@  +Vl!< 6X@ ݹXH! b@ B.9No,/,]I:&] 1o|U_Oʝv2>~1w,`Y"x 5@MrtU3%_Y?і1KčylSmxeVJ[> {Ž[d?_?E??__C/"uLψq{Op#"~MASS/data/chem.rda0000644000176000001440000000022611754562034013371 0ustar ripleyusers r0b```b`dab H020p8@Ȗp`73 pցʇѼ3A`fK@ UY܎׭\_h|s-X|N|)Є >4MASS/data/painters.rda0000644000176000001440000000153711754562034014310 0ustar ripleyusersVn@5 N  xC(QPm1Xuv|ڡ̬7i\Μ ÷!sssC{n'x}>9uN=EvsǙ̩INv`&l>#~#7nH:6rzOF ܡg74qǴg;ኜw qAڇX;1xM;_px8'N`Ac'ю h'9~Gڟ7$@·K%}}<{[H2!咔+RIitdԤQ#Jm@{m+s `xw.D)BW01Y k+V|{gxQ b#jݩ9+E rŋfp%*љ#k\[]t6kU΂dJTE](jxK),ѵfrLY QOƠsɳlkRkmxiW/dr D_13m6A^݉ιu+ +nڎTe6g)T6rݙTӣh` OR1}+~Q^n5,%|% ?VuFu);y=ZOGryU3o/]vj MASS/data/housing.rda0000644000176000001440000000066111754562034014134 0ustar ripleyusers K0dzlu:t*!>>( E&:da~@̬s_>A>Kzפw#B(\H%EH΁1Ӓ^;h–aXW:Vƴ/CO}]ZVl-a}3+PtH %EbM1MdO$|`Yԧ@Xm`X1dNGo 8'ExEG۷@ s9\s 8 [sb,}tx/q#?樷My]} .x!!/5D(9(:EBl p3MASS/data/mcycle.rda0000644000176000001440000000127411754562034013735 0ustar ripleyusers NSQm1ix 4BJ * TԊ'Z %1yCk+|k{}3I76tګF3?&bۀ1I/N[1e%sᲉ:OlMe쫇]o#>Nqx33$>E|<퇷+C!}+:Q!Q7G=Esbw@|"EK\"zExKėC,c/S$~{*yW-"|Ogɯ׮:uѯ//QD22}(~p W>7k "}n~0tz(_~ѿ2 <{b ̫y zy7'M7_ @h~w.sg #${+|G\S/w@=.sNL6cG(_(#U\YK4jX}+Bm,9wOױ>h8e{|`I|+V+Z}TgQ1]_[y'^}~o6!e u"n\:Sn,`ND.W<qZ8m<+{:إӸ{nϧ7!77a*Аw׈_7^(?x$)gCQU?&BGGB5/17>H2,(ݴbiPU\)%ziE@Дs =_ X?MASS/data/topo.rda0000644000176000001440000000075711754562034013447 0ustar ripleyusers eTJ$AA"?@6HLAd5ZeaaEXv}) ʂӧ꾪U]iLEcg,>Y1A.6Zk-cVԢɷmOqυq5qrH/xr@} |_G~/61?7Ɋ_S<'ةqV!Cʿ_qz.ZTqǿ|Tg+XGQzeXW=zbwԋ%mocbx_x]Lz{>i5A<{ 840 $Fm~v@<m%r|=ׁ68wg|B٧\ީɭ&+ XafKmMv\ɺ2*'xyVnZci^,^ĹcMASS/data/Pima.tr2.rda0000644000176000001440000000770611754562034014063 0ustar ripleyusers ՚{|ecNBtiY؁5Q**jffjjv),+39 6irDʎ}?0>x羯]}y7biiA$I69)ztګ]ttt>=[9no)VrՏه$ֻ5i\tpOt:.~,Ő8S=޽ꂤpT(n:اy&{cgd=]<>Ksʻ[6V'7WWbu% u~ exS|t uˮĒtS= ? QS]z:}$xJ UNg=-HWyJGuR&{xieKy+_[VZ}V`m.uV>jQﴲ"+%+[+\g Žʧ}+/XYFLqVnNq+O[9ܟV+i+_ Yt=>&'+_]o!/~+NiRoVb갌p >_@G[nW$1]-~cr`;JӱwY1X.2ʗl ;o[`] zpk1 ycy2[ b_ȾRWvn%\Oσ0/GL?uܻQ8r\/-B5طc 8ǝ߀z~ϰw?܎ \m4*TY\ >nJ??%r{nXV&QW^-u$eU~̻&Dpu8>æ_a9R3 |^5}~+1&SCRJ<>JЍYĺPًczUAΎ۱ՂSTS Q||T)aPćp%tbgsa:quq)Ʀj/*V*W>xxVYgT2/oWwF&y8w bpϷ9m?5~j8tkЙ)A)f Y$u&ANnp\ o:/9鹼5Y90+ídZkeV>d\+ ;~^*?AV2,b5 ƭĶg~hE`9u=YE/61l0t Wza{yA7|COtF+(6='^_/=͋+(|JCНbg:~UA.;n{c;8 gS_=ڝ`8uZ>z7xY/-kU8_-q/?` v:?V2&d>h~bލ Eϥ/WbCbW~;z3+aEF e#\~,߿2ppJңkscQܼvC˅ ""vcy1x#.ǮTuy^x)U_J]gW'}OT(oSR. o!zo1z巫{1vK8KY/P=1Qa'c&CL֋X/~1o+m+8'e<뼨,>iq^ODy?4ToտLTqijI~:_1L4K[ƾW8p  /;Y1;;pW9Thiڞ'ͫR-XW_]1S!_H0{_joJ~ŋk>V%O_u/ჟ ("N/a+l{~**a?B_N8Xy=6?U?=}tGFg"tԗrHs5هnUWb:/cAV*_px<E:O'c_s =윴?aχuf{[{64ܻ7ͬr!o,o|4Zw?MQXEۏ}͆od7ڻ*76?wki=\\6 k'myujrl5-g?g6x/g^`n0 ~RlQaxa*]c\O 0?mxR4_5gMgxl(7,C>5c^ӂ_k!1fCQ|gy]<.>jrz|ӢP~`ɡH6ۉ4l"2Gyh iiy,$t,̩I;òM7M ϯd~޾jg=!] k0 7uU|׎ך0k>^d,z]ż4!lf+6qJ?[g\6 ~Vs}8ᮤ4[sӌ}: φ g^r솯+Û΍*F&r4_}Dm:wom?v4id;7Z:Js>=Ӫ~n2k~GnNo-~ L~T#By~臽ΛvN@m}$<fe~53g%a9he'9q_N7Q /4gXߚ9wob>oe9Do<X2G]>WnZ//p:o?/M>;\ ҽtE;LLt6;KsD31A^ϝl^]^<_kB}?@K c߽ջLGVx_^q˽x*{o; Ye{ǽ{  =.c.x{`;lbK\r >^=Htyx eoP'Nϥ_Ɨ7wr@=ۋz =ċ=2HX9H}^$p.}GN1 ^/N'?hϠ.gP`#A EUD#ge(71HqV$ q33j5toxtlD;q" q$gdqNGG>NIGO˩Ԡ#OEDx>;z"9Y|Nv}*p*w:ݩ暴,nhm\ogrł^yXE^X'urʢ%KT+\sp\ɫIa«unng_Y'e/vW;disg/;oEY:%zҪC}' 0MASS/data/OME.rda0000644000176000001440000000456511754562034013107 0ustar ripleyusers ]oEiB$Y@B[D@ n)4RRoˬ6{֕#}{o93ݸߞvfpmK]~g<J;9폰:z|qqPdFF3bɈbj`0!ND`&F| Fg3Z| B`|aF\4@ڈKqـ+F\5#7Hшk?oFaĆ׍Yu4ɚbĒw/bK&4IWM:;|]֝DS^; 692mר_c⺸NkdX\.^qKm/bNjշq9 M*,ClӶF<Y3\Pޛ'e{`~K9"Èč{W|Z~ 9=6SQA꾠1OW4TfD+>EuBmyEtXշ#vx3t?Md-YR=Wki1ҪOJ}=)g>/a%_r1zWZIøXlۃ^07|{{ꇬǞaW؉p?H?fEsI່M|jux֫/3rKj<.=ᅶڡW<ϰc8_ ^u¹7'L3q9hG 4|f/Bsd1Ǚ/XGmWjؠ)>t})0bWOD}RU/̉&uu#LM5Ww8?sLҟm>Zbփ>6N7siG_2>+8wcU}>k`,q}{$_p㚪g%g`Y \X?Fx=oY |g%1lW/9YΗ>h\Ⳓ~}Ury~Np8Ϳ֧?h<<:`;,_=8r[":U x^z r֩I;sbBtXS` F?s~ԳߺA뉮Kv+n/Y"`9'#ٖfm|k1ou.ZʸW]keB㧱vO#}ͷVS}0OSyyηFUpaLK.cOȕ?Wu9I:wg?n̗>jh ?zV5˧\qa8xk4Fc^uzvTŌf_Nvz}#אTlgKi7\ZQm%{1'nY ךLCL[E_bMingzV6< ]@)+SJ/C6]^W7wѧn{gg^zl}ep};#D!XJq͘{GzyMASS/data/gehan.rda0000644000176000001440000000047011754562034013540 0ustar ripleyusers ͓N0/㒄BJJxVSW+P)Mcʓl[J#"ud|/Y=uBD!ܮ̚yb#^6[APtf'rQγ\EZZQ>~9NS_;0us~3s\R*˯n5 _oS_YU^cuv ~X=w4z"otG!wDz_DZ?˾g̗)? z̼Qxˠ?T1jI/x`_^ܗ|ôC%~m/z{釞]Aj߱β?]zO{[SD@'-AU)笣N>Ɖy;{^a\SY t=-*~OJGR{B>9Go+ +<?J)G;lvH?莪}=^a 7ʱ« ~%ZEyHoXg~U畾x.M?Om\WIEL>)A?e{:#zxn*"eƅ뼗 OC >_h;Zs_OQ#*C[#N@':YꝪRg⾔nq?wxh '/룧x#W'''(_Ы*hyR{q=&#c.)}h4곬뵂m4Eǚǥ7%# Ho\4O٦}Tȶ gV\yc'^g&rsZ nLW^eߍ2uvg1zj:XZ_ ָ8-N)  õ-y}>l~v>laT؎A? '_E!:e[t?9L ] Pw2m t?>'gϑ#a)'EU#/_㗈=E2KxXf0b(vMF^> dy̟p$x.~_4>||m2aEAQ$/0-B1M`l8uOgGM%|3Ga&OCx4+%8cgƜWvNb;HҊ@ڊ[_LvW!wZSc*6`v —uJu4t|]mցvkuFKPOՑW}<;#Ƶ tzdw]u^/5>k3[Vk~ێl;͎4Ii0+\\c9*j|u<\1v䍽*Lyzenu%x!ORMg C m뼒؊j%CĞGgn%ue#KR4NۺW}imer'mfTs op0lt /7[˹>zo=AUnz|ON|/~G];z;y{ó:M۞!OggڳkoAߺ`,? \ 0n|Л!2?A;n |S>?zȿ8OZ丮]qi]C~z7w/ky&ʗ||o~o9n(f8]s,x<;=|īG} xԺ1_k?[ɿM}<^YIDz뗻J]Կi~G~tt Ɓx\yWG|!N/ފ_. !k7_u[ό:?jr$/{ORh?qQO?@5OἩ<}uˠk_O}__5vޏOg%.-u~뚺btr'Om4)纭:yn<ԩ1Z9e*{ N%7+>?4onvn\\>2gფL?:eiZrN:YعR}ܽ~_x37~Wv<_Ϗx8;[ށ|̗cy++8ؗ_sO~W1~cf>(g*ߩ_Tŋv?_/9r1w׈8֬FƑ]7|)U/̫&n 3TFyw!v?4\'dόo_U7eRRyQ_ x ^kC7M_N%˲0s{|Jz~g^~R7u bqY_+^^[ i&nShL?MASS/data/shuttle.rda0000644000176000001440000000070511754562034014147 0ustar ripleyusers Yn0`411o#f|ӪM -:n2W*<8^5Gj_a4xC EĽ91>HSGQ6zmqKD`c$EW¨3Sy v,i=FyiLʆ R)0l:l6, dCorVf7*urhW(ov\ Ju]S'H }WyDc,;RtA{wd|eԔ5\`'LA';*S?6r={MASS/data/cats.rda0000644000176000001440000000111511754562034013405 0ustar ripleyusers nAE3"9& 6vd!clRh4BHԾ]Uݲz޼8y8i8"UoTۮaϜߌ7=^.{޸cxo"h+/xmFLjl4?Օ[~^>s3~ ocs_Vhr=OG.Vaq ׉cqx.ۯՙ[oYOپuż):_F^Է*6oQWކw!77E~NCMAno]ⴐ[KKw7oANof_(,L'!OW+^IN\+T/>~ N}WL3ȶB>Q]e|G _݃ L-{>t9>ggy?+C2}8T~Qe:d(7S|LcW#pOy^, C~SxevIs-_eƻl '3įNχ"h8*w MASS/data/UScereal.rda0000644000176000001440000000502311754562034014160 0ustar ripleyusersY}p?a08&T$|wcá]0&ab2YI3iSBa eHHCI:iI.hIfh!J@Nz{Kz3jݧ'i9ks].W+%=_6=Iq&`7. ˕UZ?m#v<.'B]7l}j)22^A2q~|_ϐ/ZsL9f`x #>O{tCU'}-~ytZL_x'[c曫n堻7MRlR}nt~[ul->/OB{ϋ}'I0OÓY Kн=TNZJB>1Y>? +nTIKlf|3=9p̉F/>0*zmJ!ZѼK+Sy/oۅ \V}o`4TI@/\Mq䥺œyy5?=qCA<wn{NSmZ2#ݿȺzppNu:dN2k1|'~갛g '5v4R.E:*GC z kox֖ ԡn_)'Z;x쳹h= {p7wjhb{*٧QcR _ȗr-,%?*Ҿ/^qT!$< EM-5SU &04 .# 0kT/!`/JJ09a cJvl !Y5ĕLq L\9㌭|R-!FQ~& r{L]jv RrgM8HWV}MAIe "ʳW bMREn@i=0*BU RB&&(+Ôh󼚨 /(ǫ8,A[ǧA"*Ĉ"Uduw`:e#,"̵%W5 a/iV4[ E82ݷKѵrrתϫF|F7ʛ1?VE AZTin㻏ʥt>ἱx Wy藠ǿp1t_>_=j=)|ŝey݄}^!&wv>$rKggg|$>z3>_.'|XUN'uohqNRW"úqjYjwڬuss떬ukrKd|>R.yrz=v:7ui_4>Q>.zERKA/u?]w":?>Ժy3%|_4֧}9 _`g{£A NEκxdpNhqDxhֺ p^g]G'y "zF6=(|ўyg]}}ЌK?|8#rYzJ7'|"<<=~zG<* D_Q|9 !Z:){J[j;>9nR7Mg+somohmҴv&0%Nʺzͷ/o5{ʿפl>}=G_}2qO7A=ڌWy"Fsqf|֠YFMhZ٨Սh=ERI}E}EtT~v< F(Vvctέ> tMASS/data/synth.te.rda0000644000176000001440000003620511754562034014237 0ustar ripleyusers WQ6 (6,`A)RDE݀ t MD(H&"" ]$H'/wouvf9}<ѿj,xf&f ;vvi/6MXsnֆ0}K^՛o΂@(+E @Wl\Wc\GrФ~Ѭ,SY(CYhJ-iP|1JN Sz{Stvm1"]@@L4MTtPVD4;Q:bo?މE..hoѰ{M~ERFξl3kV|؃ɾynyb& UXy"hLƞhw[ .yMs ;S<7yn:v(iN(!ۉCkqE%dM'.xlaLN!S^4Qaضꘄ1W2Oc~qJ۱G!Su Cy54fToJD ֫&FsAH&-u't{D^t7BS!?dT}Q~)"7!F_A4%7A_7.sDiB{^D}[h(~s(ʦ ĵ!j˿l4[1ߔ"OqYG4N~UkHGf\}|>^|/b'P/DR`&D?$}A[]E*(5DuTC >BwW_ޅ樞9t4y 0G{0O |4ͼ& mSF3G}F>BXT843d‡(<7 bgd=_khrJ_{|2mZc5H8}L|˞d ")U4劉"Z>_4*1ҧt ᇔ7~5"sƋ=hJb4 l! \hDˈ uӝͲ}PGmIwش9OwBN@ W/0輖ۇhQ&6:qON@>-vP|[ 4T#X̡iNQ3Mͤ|8M2_D˲OM@J#x4Z#C}:ht {Ya?JBJJ">M|P>"ӁHA Ldts3ws2FG͝Gfֽv Ɣ \D_.t@ObUh…K.Ёh&G1[ѤF+^Y/ls MFe#m 4fsnIcG7hj?Y4ܧc3b&N">+g{R?E袷^AL4&ۆ/sXɵ_(ORe;C4D湈tz]m8n_@Dz3؅ǐ;Ot\ãY}mx67b%kiV#/Q_ 3f|էů]PQ'A84ζg꯰L+ha@Fp5Ƥkdfh0l+nv;b6݉+~- C;)ݏJ]un"_ѤhV."pM f;fs |a˚}yz9C}!z՟nAw4%(-f!WwDlIOPor:QҥB2\^Ȧg=h&ꗋhjRKE3=Z_]X˯f4'Q ݰE4q=uSI$_HFDz_\R/ QmLJE_i;b1Ř]QMQ(qsq~k 9 x;Gf1*jKXǍ 2;qxn4a'/fm{9 MH\ߌah}qTP}jJ.B\en;>( VF/tE c }4rMWTk:K`>} mER=4=0Eo/}CIr_\X) }vƕq%QU% k Mfq0t{LgB}E ;^/Ss/`}9oUg'- :Hh)W󳓪o8P=$xZ4{|A҂$|ULn0:u 8h?$[MH=B%T_B Pu[\(sgP]Ыu#śo!I/b*G*4w@|gg@ (+,3ゎ\1t諴 Wn{9 "w "j5xrw8,7#_%45bxS3xO|91zh12,P /j*ת|w?; XMbCOA 8Fnu=`A[< z n̉[H{& `-Q)yio9DtgpVu5m- 68dhIλgΞGA_Wl͓]twAxM2hhu;w|F5yʟT{TK(ko`wx6U=kv~|q"Bߘ1rO~ }ewM . c 1+oA\׸ZhҪ;``vp7*ߟq%ڸr<+ w+PT#hy_9 E~E30l+ È7'ϰJtQ M+0q,. ; (F~ѵ1.3o]q0&"p4OJϚg:) ]xnwC5yNTx %RphMNևt 42`X$'^ T.OS1A+k2&u ǠabWPYh12Lfm3BsBIXT/@ęEKIE#P_#7glm Қ-G#Uw !L͖/.~L爘L^i5iF]SGCgnעĩyt!{aG&t;QYMEзO$akGQ//b"55hnۊFy<0nƒ!ZFU԰?BhU\2>pj_,7|l7AgOn4‹1%G'&ICP!P!X'/j. UW@[r-,e7 %\AϔKGes]GQɁ=sJ^nWyn + TmG6!ĻW˒Ch}y8?VIkrAYO_m;z-?꠶2CgCnIUD N*PTF]Yܧ\hFe#Yf3HXڙH`o'=k.k,ˣk(Ibym *8S_BJaFiKݣdݛjdPz퍼@4*%,  hp;gFA: T:j3]Y$eJvWIA=+;#AGkܙ059F)lJT-^T`;>bR H_P&_ 4·~~TqC%/qsk_#`66 <%}xFZ53Pۙ:$ne^&ztc¼/{qY=C$&אj{P͋Jw^\*6BK_0PO1Ej-[FXњ1P݋z)*{1cHYo+? .UGw`_;Z=H:|Qۏ0a7r{р6t6,Uhq!:mo哮z<$ I+y{j Uﶹ(Ac6%C? #Ih,!WD.Uu0Qj D@6>]"Txޘg1܊mQݗНu4ԼEDг7RGu<0p澽 ‘e$GP^|YA9 WLݗu[nGm7|-:ߖFfD(|NI9Ma.hTd\k4vRmၺ %-+􌔍VnKQcvM E={3쓽8KF>~u)/uAm_aRXz_A 1霸?Z=ܯu=fl~ՄHI-z\g@9h̖ߜfOLఈv`@{ #s"B'4 CY?NV &}`7N FDyׯj;9{si#?Oa4IW\Y!O0s$UJ7+1;DÞ+By Dlot 0wL(" dث~n}f`jqQ ׬ 9MΛM@ Y Gg""wLˀ?@9YC+{*h-&ŴQ/W6_,4JO]nWi`®QVmLO{=`NpXI@n0v1/C@J)%}&Շq@DJ%'Z_WCOQ' Ho_|e< XڟV9 B_PL零r觝6a W 'c>C:\[_g't]d; ّ;$aDt{¼%m|0f^}'Z~[/׈6 %cx:\?j4ZxGRv'@zWZ%L>0{p]lZ #o,a+qu[GCmvc,&>iO.hW [Ԡ'x03͛|M.cKP S@ ;]_>KBۍ  i>}4ۓD`f_zPp 9fj<n6:ޝo ;9{BQ,t :_u' a-: ODرOB)47?}Ee0(!3E¼vQ*P'6}/ǚ~CGH@e2.÷,'_7W0+T; czT9*lsNo֏гu5 j K0xqhz  h1/`퀸P0|WN{-_T b o^dhPaE(_ vr\%6лCx 1ߒ^ 5? ɱxД[JhQw*҉F^͗]0rg'6Af&(Oyk{gn ]{Nn_ ]? 2~XUY̙l{~ilVSBl!7|j-*=bqHp>Jb.O0 3PU3脪b^}W37ëw -MN:sY9-4*R뚇c4\6m=}l4'20eB-G:`bJ @eS:0)=Jf@/~ю:]{Ͽ`KsB H>)r` :BHr4^R9" 0} -W`ЂS_( ˬ7TFYX/Z 5fN_8ԏb]"I>Rwb|_}ߩu~}!o\+f3:j xV1gi/vCMUvwi]N.{*z_ˉGrׄ%nz]ӽJзvEGHoVՓjWkXgjv@wԁfݪ1ѕ>M.kl/5y㓚J*āj|w%@cZoC)N3-b?2AJUŀ]uڒToVE(6X. (|]vWJv?LZM3#d{]nB0xq}ǧ]h/= ֝x:L7 As'" bD~Y&;A=^>wƎQе8\^1 o 64Ȝf2꫚E U:> .;wwC#G~q@h8 Ua"eKFcP4Bď |Fwzԫ+/'`mXbfUXE^5*Aeglb,ʹ_vj wZ@񀤖`chIJfKlv+ ~j:CW&/in+M~N[C9+cHѼv7Zy7^J|"EgR#k񃘆`(Y1 5j>K0u'4T=g-B*ŵ@ܴ]:LZ2Ŗop.V8ŀ^#((H>#SЏCԀ7U/#wq6Wmwb!ϐWT3߽^t,jw6ILQT TI _0qh|9ws!3n+>=u .:i`/i7G1Wt3XpTVdqUб<;0 v.>$EtO U^D*]3 q`%$\"úE|ЖrK,KhGW߅a 3{|C0w:\7r8ӡ#I~ze@030j Juo ԍoUeWbj١K(hV>ezw@d㦏1pMzdơ@2,^y``=xwOT,"sy;{TۥR\tr,C_;3qK;v;b7H2yd2̭Pٞ\]$Wy .kP EYAZ^sM1$MgaR]bF-{zU}:cg޻sd t!*)y_% paٖG:ewHQ'? >[S^K} ^W3 z ާAv,F$wPyxcXy"ǗvMk@~twQѿ) fe0|1m %@E N`"`~Hn@M@̒[P {D>3`#<Dp0Ynx FWNFE3 ̠(`nEsreLYMo u~V0L+ޝtV[`]vo؅ E fUw@17gW[_Bd/W4);N5anUr{ Y@R2S|&}_zTБyA&@~rQ53Sw0ͥ;Á4<4 t QI5^ sK0/yvΎq].PJ4e+)CKX:nYT)J 6/`BEQh %ԓʕ@iI', wo"%mG'9fG+s6$~\Kʻt w7 4\&lh9!q`$z(g8#Ԧ|NVM@m[ f ikBM5\ ٻ엀$ՑIgS0!U[ f,DŽ:„L:ܡs_*<]^DQRMR7&Dx @6g7v*^/h.S0ҽ<L$|ܑxLVMH+ty }VdѰ-@6_SN\hqT I~-LL9JaArwrǗu!G Bkמ€_uN& U:a"+&<U%tJ#a\~ۆ9o__yɖK0q1.&'# fcL3 \_:OdP4W=f8(- jJweRΟ.| 3C w$-6J3jɷqn!y)1 t^[~mA&\6G6 /8 (@;85L,`kNWnxXc<굽 l5@z{$&#]1?a cZMK# edͱL` Ɏ"@0ks:! ԟ#[]{@=>e&D= >&'a&bN2L$yQ eiO艞~ y}Q?k}aTjpρc"?(y!]08yaЖ}cݟ֏@kR-b]c=P[g5ɒ,0[ʶ]玻{L:I L>_꧿ƀ=0`FfKn-0i˼b|@I j:/#:.00}mSoj}EN88DZ&}szvV7zMY-@ϛ\6J;ЮW (LqO Vm@FmƦߙ/$|i8q_lO$\v+2?Z,߉0]YV]W o+gq @ :L)0ï*<\ƱA{aVr]ּc0 doBC!:s7]n_́)<%oyf-<0 ?e偘D;L{`2GҠ_KI'`sK U1kaHzu@o6$KZXsoa* ^򉭮0l7UE(! 6&z p0: 1@/"wG_Fy5m R&<@OrĻjP&Á^L`ᴽ'Xn0;><&Ė#uˁplHmp8{}@fgf1Irĸ)#<6v T-^kn0nUp7Fw0b^/0/ QWw-DMƓwFHyp3 >nyܣ^`n$wXL;֙t}j$΋x.dOP` PV*x0N `6Ϯm0o-:9P #Pz cDiw /Xy`N88Ywa%cyF]'@OvIwQd(k PT} ƯU3aY[=_zêGoۧ1sl W>{GUzKݭ*1nаUŞ-+Vs߃N|kyfHW-T91`y= SE X5 yvGaPB2/Td8p(sҒE@ފE%8*8a,e<XAZ 3v@>Fyq)^ Z\R_&koY[v{e0~Ο'X: 4E`K{Y+O;L5`O= @Uib̘ȟǖ.xXwsշ}?W c.q0waeihݢ.{Fl9oWE<`+&49:)X6 okb~EtLz bB NJGse DGap^]qLYuMy' ko|0<deu2d{u6G8сO+ecF'L5o> sH)IDnPmwVs=0ꤨ鯟`fGaFUI)Cm퓀~[|ع -P WI5ṭYш@okMc7mӷ/uI(:L3{Wֲa,=u+vl{(Kc& .vP $0L]%ʲ8?}D 0(5˺& Y75{SRj^ ̜=ZU,?&6%nJ~`2ɏ\n|-Ł!Ma!/=fgD^W#va鷱@4E>z0L9ujJFǯkX70㯫`d Q/l}K/Ӷop9?c'I6OOMASS/data/Pima.tr.rda0000644000176000001440000000565711754562034014004 0ustar ripleyusers t?@BY J潗 #[#H @R(G-TVv9ֺcb KYB"Xqmk}oľs.3]Lޛ_:&4q0',Ba.ӻ#K.)K]8(-=l3=sw c;k1=teaj鄳&1z.XXCteɳҕQDYxŏča}߅Xm_xlnf)ğiś+Xđ}Ѯ8g1ķ>3§j ).#%c؏0B:idZ򌖛iek]Z-jײD-ir+|GӬӲXR-jUZвPORf-/F-?oUK-1 F}8ܦe Z6i`s{T0>~r OQ0YMk|up1Gte9|{*r1\_Gj[gka>֍w?:RZ^ij-7r8|֧ϯeXs W3>@-Y"Ob,BZa\6*A<ҷE?˲r-$ͷ>bLF8,Χtr->rYʺԡز~ P c+3nXA5)tB.Ɨpb*K- R?Cp%,b,faq5E`JFʻїsr-#jI2@ -\ -C$isMcً~K1x?1JHlZlX1'kx/SPi`?l7 >Osz"b7/KnCtF+8`ًws6i x{XSx%|#={ 5199g3R&|qئJiV8px9u0G{> b]z-}I;|W_f#||?ֳ*g3a_+`~.~|%.S< ˾q?q<( xc[(}-^ 'F?vO<Oxo_pa:?Y/@OxK燐g&}/y/R<'xf7A_D%ǛuN~391jJ:D>lֳ9--I~Rq2C>z/A>с9wr o `^ʃ|ɽk;q۟/7oV[{oE[m6TB7z+VMwg[6N\>y]p_Ru_}jUF=+Tg]}iX/O[\>D޿6}Ի3#uh\ߦ]}l}աEfmxksƩ[T}rcϴNW-x}.WLjsãM_m{qd ~ Ә=LImtzvP7XNN~9zBí4cqInuת}+vY[zojXկ O/9j'|Jԡ a8\5?AՊ?uԠ~Zms R!iSmt)g[.mYP1KiPⶸ4MR;ɣy@ 8M.MWϸy!-,DݎS]];zp:ҵD86:9%^֍wmVmnQ^&z'L>ti>re/4N )j|4Ug6x?L><rI3y[V~U-9tPzm8۸{uq'Aǜ+m4ilR{(MWп[xmyꐡM6D7,8rԨsSjg{ sԔoT87U_SCw:}>r]PM:Nt;'[O?ɽQGS|O5zmPu{vܻ-LG8m7sz:}#>j~iO {:=q?[+yst&Mݼ#bޣ;y{ށ̻yv|sy6>78VmkI^ҥ6q8ulK' k7aB^;? .U2A)JaH>JTH_nzc}_bE#xMASS/data/Insurance.rda0000644000176000001440000000110011754562034014374 0ustar ripleyusers kQf$~*Uk lA(G<.즊x x7/zúEr̛y{jvZps,#˫l= Qlyf4xcR_UL;qMj:uViXd.%af8Upm[5&_?>_~ϵ['iji՚Zޢ\uM NƝ-[n*?ShW-׍7FSo -l|fȫS+3)A/9x (V2+Ĉ( |}/z7)8'+i#0\DN,ҤnqEpeTC_<<-<|C Wu5ITNi-Ot.خ{`|5Tg4źWgT51SEoB.{_QO~'C+z0ECt~[ ޺Aµ[JZtaX리B5[`T\̍fMASS/data/farms.rda0000644000176000001440000000046311754562034013570 0ustar ripleyusers j0 )40(Q~@3.a#Ы1Hp'4VdYYn;϶(PV$- dL>wx"?Φ%(PŤ)z GUqjjj"Kfwl-aRҒ(0C}W 69.DE'Jx/L]ThTħ!%378g|j̾_.0~*_oalfV[*5cSTpJY"MASS/data/epil.rda0000644000176000001440000000316411754562034013412 0ustar ripleyusers ilTUӡ--e+eEF" oRJvPI[X5jKjh"&nCyB,%PjkQF( I!R}ޛ$LsיrN+xa](E)kgmƈ|t^Jf*Tl"s qu d}Y:}A-XElz  ߜ.~ u X yH$X1 ҎÀU('2b1&m ~ vq0-nedld3CiɩtMYM1U6g\ =ov){,rޚ1G?c/J׊y}B .Pږ&۞-),ү:ͽpOX <@b>@*1YH y pF p1?ڻ@{d`Ǘ/Cv ~W#=ȀO}cy'{ 14b\1h}\a{h7,燜Ə):Mg}c͍q*Q_55uڸb˟$/c?W?;neR.>\潕Z"uT&قg&7\sg՚i% U~y潴WS=bRڡjG3s}R~w{I?oT]J'YI]riSH;0MItC&J|PYWe{ Kv'T;i)-ϵύOf|g2>6RmW¬2[k,++HڽRֿ)e/>7~yzȑZ廥 ov1s]٠㗋3~yΕoarR9p3/kK}Z#״B\XJJĥ5W}yNr/Y-oE~LxK=)*\D(3+pWF_JWG\~_ϝWQ^JO*MASS/data/GAGurine.rda0000644000176000001440000000312511754562034014117 0ustar ripleyusers _lSeϺ1ʠ]X׵]韭 _DžKϸs~Xߐt3p0_f#~J?S'&{K^&e_^ %1 ^*uzkԹFkN>G!uoP&unRzvo۾y{;z>叽˽C;K_O yA'bBw{99F71{2uq!Z>^ﵚ~q ouvO[mtVa:0zyFo/ 0|yvi_Kuԏo~QD>z1hХ{].?Nq/NS/ M .nH|{5r>IIIOR7iKҿ)H'~'RO>ه>_g9' 4~i oAژYgt0sf20)'=tu!ͦL?YHOMߵ[#6co Ng9ԝu&a=I ` g7ýfþg~: cf~{{oKge#S6؛9x;]}'ί1uչ@&IW՝>)ӧS%~^U,}+ċFV/cߊ?}_귌NIY~MzIw#~uŃ۬QNiQ#n[pq!8~Eէ4j4 vPֈޖ?>c甇gCP{G[嬾TO YxyνsFw/?Ov{X14ԷS!4oBۇ )y!aKg4'OZ: _ _Gs}:/='Yz*^u'=dOO~[g=yԟtkKc姽~Vݜ{yؓĩoXqʣk.V=+9 )xQ*ΓrY}c?.E݇;,ab年kH70_;q- fU_sCLмl{i~\[C^xeBZbeF6=|7+٬r'rDvMASS/data/abbey.rda0000644000176000001440000000020511754562034013534 0ustar ripleyusers r0b```b`dab H020piĤJ # r:H1L ӲPy(h(Fki`P6PJ;Bx(|†,MASS/data/Skye.rda0000644000176000001440000000035611754562034013374 0ustar ripleyusers ]K AAsEf$wޤl<6^O.Ùnf=8&"LZ DcfoCI0 (;T|?p/ ss9|XtE_XA+A/Tcg]s[ $]EE_mX.fX7e0rC cv3sR,50¶O0gbr%Ķy؀}g3^#.H2m8/fh*XG;0h}v;4)>yl|Lrsy>Lb\Оvu:3t*Y L8x$$^7{:xNyۨ4 ~>U8O*=L}aTģj㜱EM=FDm9 jŬ kTĤ//T3Tĸ}PUC= mXsF (# rB C=WnqfnrQW,1>ZNp ( , *z`"羪o^7)_˜Vs h'EU_oOF{ ?qZIhx]4]Yi!'Ce=tj1A4E:Czw~8|,crKקk;&>j*vsdغi3'\EM9DkwbU eAߊEK7{c-.ػfK߉}S7p/%wZ ?^6WRo^xs{܋b[Pڱ_>-L*Z)lU|֔w1)uG/֓y;tLw;N EZB,ie^kf{},,$$C-=&LY%#>2iea|5#;SNJq;)Ckgܾ'#sI@'ąޒ;'gWhy+xPg9}b_Z/3w\'0 CSn22;gɰ:>nvEFSBd/m>fSj9r~[2Jqt闫+u-]9䧺vۚ܋NKc;AĀ}:>7bҀ=4F9@&d)G m^3z@=2lGKf3 /H?2Tπt[~oݠ¬zlgQM䊖luⲹfHA~K11 ?7Q[`6$&q800&[;)}|>Œ/T5$;$+ 8~2ܥغ*8V9BMASS/data/minn38.rda0000644000176000001440000000120011754562034013562 0ustar ripleyusers KkQL2mB!JB-b yHZ.||.\)ƕ tЅĵ+ߙ&`jP&9s=;קK~W,YGd0{_uV<%ن1#ML¤\W:.&AVZFDo4<|ހ^1 [=MkLֶv_%+\¬sV~[ӑ }#uf;s,&"i2GT$k߶OK_3Vu~\Z?%0v159dUK\ڶc-bjXky|aLQ3P_K^`8Ĺ 3c{< ~'f=겼cS0J{#\&9m;^"'Z0[y_\rkq3f9HV쫮c3Ч0~=yB+OQ?E>59xk9/SLrC9͸ܻ`9~*s3>ĶCDžw6b\XާRPe2ߨ/Y;Z;ZG^MASS/data/Sitka89.rda0000644000176000001440000000341211754562034013711 0ustar ripleyusers KTUy 3}>1v >@=*:t0Q3ƈY-Y֭Y-U@LL%y~>ύ'IAj~@/_=os' xOvnȗ(g}P|yM碅ݵ$sKOc߼'^M^@rNgku=^$J_M\sSX^Q琺yn΅n/>\X#N~Vw9=up;廯KSɿj|48>ָYoĆ<MASS/data/shoes.rda0000644000176000001440000000023311754562034013574 0ustar ripleyusers r0b```b`dab H020pib6a"> 堕 PZ uЙ TRPZ TPZ BՁɫA@iu$VJPZ = 3`AFG HaHX3MASS/data/oats.rda0000644000176000001440000000064411754562034013427 0ustar ripleyusers J@7҈P=B)HFP*KBh("*zSMXM@KegvggwKRͪx0 X>'@D]Ei]@[DOLJ0{}GY"dfme=tT2c'g\_tU^² FAk8W^c*S%wuP:eZAK=sNKrCW9~WCڥ'd*hcR[yaJ܃w z@|gO86 x`/69?*h<м];W4i=9ޤ#ڋi,<B{kҹd`|mO];Jm%+m_0"V#D0XjeL m+MASS/data/fgl.rda0000644000176000001440000001035611754562034013232 0ustar ripleyusers kegwb {Yh\-FA1hXEcAo1&&h"dAh˭ nK)Tv۽u:sivJ[<=3ygwN]U ڃEE?mAlivl~c+үYhvcK윸9w7R<i3 AϹ;=v, 't/[϶ٞYؒq\kG›>Xwt'Ǩ3{Qeq}GE6|\І[.)>yd),oɌak>(v>'o_N[GSouh)Sl |p"-i=.ƒf7_?">1;^i|E;>;-)!esy_̓<| wf<7z%gdq|!ŕ/9 ɾ"u3yQ\</jh*D;[BUӚ`rS*dx'~Un*omJQ뵭fZo+ai|U[ztܕDsrS*li*?ZUOeEHT%^Ri~!c_x/ڧ O:cدXWjK7O<^^⭊sWZg~e`_Sܯ"j/.㇕'Yc#C\Gy]Uҷx_|9v]ȯt >]ޤ%^G:[M:^-x#^^uz_8IQ>_u:WKXg^Vs~dN\u >W*+o+9KyJ::`.୎?:: ' /¡z=_8Q'/]v/a~րg5at:jUG/u0y@o^{|]ǟ1<|7W/8t}u#yw3^mn8{/翛8{q*~/}+(~pോ]'< wGK/Z/KvO!~5.ͷ7%xˣFPxu]G;ooy.åD}O?:W>y\8|x 'ӏթp)_҃*RCW|Ns:U^+=C!7psrƇG KsxWZp6oxмY /T\m3[qM==oCMx784]8ټ`x.[(S~¥:;Cp~i{It9^eot1?du~!Z / sĮƍű_uy_Ku|Nƾ3?n_x.̏㤃)Y~mw|_;t)x!+ע/v%g94z/׋YpNţ>ŇP}x]G̀_J z<=u9g\uÎ_yC1Suҍy=p<ޢ%oF^>4‹8#9Czz |#j^@~=9 c-z;N[8gɓt;O.[QcpkNMo"'~~}8ck1qlY>xxֶx6{>_9~棘{=o`⺊qoqְB6Z>5f86X÷^6bO­<'h|q#?}.a/V;o$o3FJx}=O(~=[ΐ>uT뛰9<%ع<g|:}3x66a U|Ⱦt(]l3A<%=OK "ry8~\\z:YK*O߹عN3]cTuxӼ%??,`=ij|+.oȓz_Ffb+EKGuF?4J}:0y|\]_k[@>5~Լn?Dst-ܿ}y7Kxٓ{n?uǧxs.K|Wu% i^p݅/Zz=uQGWru^e\/{S9#;4 ]::?>y'1[Mw3E։AyTW-S9$;<.gr%e+%.N,3NK)޸Ry EkF .c(oYW[⹺B{{A_i.x+We!~yq < )Ay+ty < w _oe/st|wPҙYyOuyҝmS&p\|s4w?Ŀ = ŭyFO4$Ϸor~}EσM&y-.q}DwݷN:cOѺߍS̿~q:pӧݏNShG=w'~?6uG>ì3RV#:qZ_~UoZ+}z,~3)ݷmx]\^t}+Nv܋ͯ{4SHn_C KُVxOG_/Ş'r9/[s~|z{.7ߎnX|xM ڿ7W|oZo:0ud͆S<9Kok37PpwoAMASS/data/eagles.rda0000644000176000001440000000035411754562034013717 0ustar ripleyusers R 0ܤlQAP@"xjR[h/W7$ 6bav:]!Ar\@`8p=D1@L?aMScxRyii,$93*$*/=ڠhkl^Yi;,{YcE1xs'|͵o @_++([Lyj`wN}MASS/data/ships.rda0000644000176000001440000000062311754562034013604 0ustar ripleyusers ͓1K@_.IkbJQ,A!)jSP餋_IIpTp{wfhG=߽ww/wjc+ a7!- e`FDUX X3 q5ZhQѢa>ܶ J7B7ǔ#8?:+q?Z6NT',oD'D;u/|`= *X<ȣ>*+)ּ"76{jN 婛4X`~_8K|ni3xh9ws/0{N:B?h(yawu`~đ:Gm:Sj80D& .K:yZe+oS`&)ܨMASS/data/crabs.rda0000644000176000001440000000424711754562034013556 0ustar ripleyusers [r2&! !1X.uedsHqLQ̣VURg#d;pFu1=[Rqӧ9}<|јjLP-_*4T8jA~Y f8ј: =G%1ά{tx,^<:O$ G:|Nðowkrl5~Z2Nd熜eqROVkxu)H]/cXJPkex7˸P[e\,2)Re\.J3e̖1W|WX(Ze,\J˸Qj7h{ M5:.?rtW@>~U]k]~;g|@旘__a~x7:n_}yV]]Mɺ5&iu/A[י~9WG8]Bg} yw<'t '_y[׮++cjg {|ݵJze~8Ͼԏ{O2|%1$~c~&1 ]guBźӼ ~77&Gf ՗zbЯuczF?- h^q>a[CvY<9֑vy2bu{x \B40͘1׬8dsckTZ i]c}_\6wmsN۬ۆ}.ze=UUsupJ^ׅaο^.>uٯt{ǎ{gk'r;\҂ź5ƞ#з ȹ--B-::mַAwWmb>/~P"޻7j/pQЍzRAns8ؑ_A]~t{q>Ǻ?Y}7&43L].5W?gճ<1ώ8gu'.{i<}=G;rZo H q ==d4>{ ~yvh{F>rzmyS8ow d3)Bn؇m["#oy9m(n\UOF~Ǐ+xćy"^O73>{¿zP GݭӰ\oo+QD>gڟT5q>Uc}g2/> {J'-)GYz£|sh ;>k}i_]k;&;o{ݐ'Q~vgه~tt" Odo s"X;r ]|)^оRFГq=@MG1yzwIYM<(3ϝc};CiWK/*=!L7j'vS=VA"h"~|i~_P Sڍ  . 5[чSAxO%]k~>?cN&N o ? uGq3<w{}ͫGuZSWGM}h\I3ؼU;mNfqP_oޑ2@MASS/data/petrol.rda0000644000176000001440000000104211754562034013757 0ustar ripleyusers AhAgw*zB ""1)D1VDD KM𐺒=x͋ԓ'W.ԋxx޼JɄtfvٙA\-Rr3xzG,^RxRu\ c1b8j%>l_ԛm1HZbjbB1bs[̢;ּ#kͨ-:7N܂' ݒan\jx- &6PiR6}H8-nio4zqXg\[Os}Vx[oP+6a+>E~? ~[18ep\[w?lÙ1xk`c^^\y]ϣia[kyp\LQKl8l/^]նJnvq&w~Mf>Q`s/1^I ^+Ϣ\+>^,\Q}:oivF19$o]Q'Q'*6Zƚ:ۊ_e$Gzyg{ D&4MASS/data/wtloss.rda0000644000176000001440000000067511754562034014020 0ustar ripleyusers ]+Caw?LېfJI-zew[t?VkJR"4.RJJQJPKrv#߳癲>{޽.uyb!]؝:pbэtLFGsJz4"j|@$ h` ) [ ; pGpN  %'AkH>^]Jsݴ4|D}FZJ|H픩"i]ؓzs^^.VKr_o2H?T\ҷwR(_P]'Vx>Ӝ~ZJ4׮8?xNZXJ(l/WΚ95kX3wӓS6=26zR*v*w/'YN/UX~ i,HMASS/data/npk.rda0000644000176000001440000000052211754562034013244 0ustar ripleyusers RKN0uHJl TBP`%&M$TӠ4(D,Cy3~ct܏1c3 8Lcx0y`~ [[XZ%!Rc怺л>CG X5B9UX *wjv#ewTI.5'ZӶI&F+mU$oL1>8C|eֹ0kȻA提 o%W19'Xr75o/vxbJsA@JRM~QUx"W5MASS/data/Melanoma.rda0000644000176000001440000000343111754562034014207 0ustar ripleyusers [lTUwg:Kg)mH) L\Z WA T3A@[H|Qy` ƨb c jT4&D1PRhturf眙iI^{̙gևR_/.=T46*ٔh֜P[DI`ՠ'yg{k f %*c,,$ >_opU)_>w *UP cL-d;h%*Z W ?b?rtg.8_iq>?翂T@E LsA=nrxJ|o@gMmQ\Gj'<t΀*|G1+D¨8xW5-`0|TFP|/1`]5M^ǔ*™OWbC1PD+VGQ%)yA5zK{K2s T/MhMfL3n)1V i1ֵ8{9.Bm_swZD.7 >20v0V:].=6~a@|8i 5l:8aN^?gebPcn0 :cF qgu=mbW^5ーg1~*d4`QvZ_jcWhW7lȧ0CK(e3Rz/%>y\/ꏠ*_g%^+2.:}sswy =eYX0?yRvs{aWA?zS_ןCZɷ_o^%~ϧכ^Z J?%vZz=Xyr0^>#nNJW-b/:FoTG^7_IjyeZhu*qt]nsќu׷u>tֹս"%}fVw-|,:od\}[Uo#ueW.ֺ^3dn;^N6~o6n3)+׳g]i<N6vi$m*9߾9낶D6m'2?L:ؾyK֖d,ljLnm6Dք<<غZQj]o W_iMASS/data/snails.rda0000644000176000001440000000055611754562034013754 0ustar ripleyusers VN0 v2`{ ÎD%ڵj;ȣqki'!1؎Ė#'t~}0Ȥhr0ƞN˸`I~"sq0*M``yzxdM+vKJ.={哇D_FJw_-)q<^WuއOE1~/ŏ}!~Cv:: [,?⿔yz@~BC="ُ_;]%`oW= J񈐫x?x%ҏj/v$;E .2.b]wS#@T}b*D{MEMP*/Ƽ?{: ' e⠑;f_W38{80S;< >e)虩ϲ_IOUy@>{ԣ'3)ѩΈ.O]}m=}mGT|';2bO]xo𽅼cǿMkOQu/~+w:٥:i)ԇGAdT?{9_yiu{Q|P=o} GWGקSVfFS}١{C~/UaE~Թ^9S ꃲR:gೄ?o[r^ Oo꿊#~~?woOw~f]u{`T⧸V_?o 'W\s}Wo*=IY眯{zgٺ/L%ݛ٭|f]uGyB)C{t8n8?=wt5]MWt5]MWt5]MW i_x8`y  ,X60hhh "@j 33700b1 0p b&MASS/data/michelson.rda0000644000176000001440000000070611754562034014441 0ustar ripleyusers +OA3BjDCKy$Tlm )GPFhPBs3AX&ΙٽvRZ42qK)5Ř=o^W)-"/Tx;DBL K\cF-0g1U-晿 ?N47ܫدXUΫ1XS{/-߈K]SJ{/X 6}rξHfU T M4`Ae:1'z.2[dEي+*VZfźVlRҲS%N8Uqjթ5֝p(/!ȧK`7Slz+]o$2uDc4zuIz!UAؼaq=x]2йYXM,B8}bv:b5:~ƿD\Y~whW{pUFx' ;)A#D9$2q?QN@bmϐ_3Ht?8sX_5K*䋴34YM$,F,zG`>Ƀv%;Nf|muV`]vBys'6ti1c*9 >H.q]d %vxw2E#kUVҔT.@xz;P)~Ϋ~AA2YR=U +'n:Q]M[vgvpybMqwX 7#a45#_D2SCV[к+]G27A9WҪ:pRCb 鴷0j m߻ڹȻ:koR`i4,{? t̚[ uiDT_=!#*FT[7oe4 Uf>geHB1j[qC92a f|A jf QuEEy?V(Ľ@uхJ0K#(]\anCm))kCS`~Mtj(ˊ4X HLڭm5VWu;I_܅u{n.IE@ɿ䝀Hv[-'~6w%oi#DL2n%nB2Ƅic`h^584SS0 zY)C]Z`FIbYASpu8K6̘E/IG쵺O .#$/=U󲂇>C lQEKbjVsxpL=W' Qs2hGQ[_+.M'u*M3{]kVc8ӕf ܴ""С 8I! O|E][&-z4Y ZgiC~vH*(@ٽlXPSC5G\ 7Îy!x*K)qredJV1~򆫎nP?aas#0DղO2ggrk$B=#uy,cX> LQݞ;7۲(?LC=׳6'gq^l %(_;| G*nbyWǀv:[% LI<(jw =XM?X>cAE& 0riHWٜ"]:[ ج 1kX_,BOQF!uDDdy>Mc?}cO#"v.|(5_]]`:]7;w5_PC6(;fAeFi@vQGV5$ O?1fjsb* oM_gC# k05:,nSN4Vxӡ$Q8w_5 NiVmyDž>5ݰ'0ژh|>zI0jae9%MϮk0 yFwqKBbI[u/WԻ*Ez6Y Xg?3!À1ݢV bp*㏓iC{œCC5iנk.7<͂hAI ]uyKG=__ =;o \o8vOn4w`g*gu~F}j ೉hl90n R.>)așdJ45sLnRf"=Fb:P̈́`mq֬4@Dv'#mn^I`jGnu[30ڠDKFnuA1?Jz~34?ѹг#.g`\?PjGVdx> ZW'Bd~hIw:D@hs̗]t _ 0gκAԦ@NX!GM :M>܆b ~{HmvEQv4,A^" "1z@fyt/ Kqu@-L|<_UP3eSxJ'?4\ &hxQ+6* T*_Bz: Od0R%LkLvs\= PPaVG@ȟ nɠ3!T`UZܻTAu{Sq~u3GB@cto2V3ا3LI8K=ߨž2LrULv ЋjcɌbg}Wo,PͶnb2UglE;-27J|Wz&m04u#.ߛf[Nv976nK{е\Rt0 Apc-sK6?x@f Bm._~}MWVڧGwɿS.>~N<]ی/-x}0 ܠ)l&cia9sB+`q@@vUKEsVTDgbT{SVϴ&>h;I9c92?&B#C]KaxۍX1qS-\~l޾#l$p_j,,^f>Hqkk ==ز]cv]\ ֈ8|`^9-/FU$vc0d˲z=ݮ[g:lc _0g) CɲJ!"8A<*f.XgvEQ@ hA%}/ǁl{8"~ <~x/ik^}fd-e|dkY]C&t6@ߋJS2F .PC q\%ݰv-yP&U`ׄ\lKa˺ْ\ȯ;VÏaa#e?M%gb[\=aAMAO 2Y-ю%f Mˉ)z-g)Svg5ְy? o8ƕ; c]9ӭkcsB 'X;(h8kۗc|z6 ծ9  v_dΎ\VC}1ҌTˀFCB~~<&du-jü3牠bZo6_eia/UՋN2`L^&PIMeA3^:Us̱qC{]lH%zqdyLpQ (?d 86fKհ B~"-+bMf6]Ծ\grrhz"\Cz貱-Ztz\[0_렛GӮf`bS GGk#mȸxႮ\(5|쩖ǀ4W[4kCkT~߉b_|`kf7:.0XP9.ڱO_ۮo`}uC&O> NvH ZïXM̅H9jX,B!1u]#&Kb_ۏmFMZm̈́oc27 QJj;!x7QЯ83:]WP@,5܏I~<¡m;xեڇ,,6kBC_u0AR> #qJsVf<u-4†^,,90r=YLWh,:5ynm&IiEϘeμ:̨'8gLJ739*0l8x,`ͪjN 52v6dFn/~2?U<9'wqM%(|TΥř#gtE.ŭ͚6Őbh%E7xQ*!ZJaj tRKod,Aae4EDco F%6h;rǏ{c(e6ѷn?ŀ>w)ߞAvc7&0JAqEcy$XBDŸ;\B{Qq+.Tz&RnvZM"Sjlp# E ?јg'LFڒ?OXi^d~k5 5 r2Y[,+lĠug=z|Pu ~`/cuDooeVSD <'-;%M[iS`\r~U?F#Y-`cN*\̕fΝ}ҌBtPk/G|ؘ.>s.L-4CoL)aqvR )=Pda[\JM,9)E"(أݮ^n/鬘]_?~m7Fe),Ĝd0eo0{C#{y{4'oGYd,Š"M)o>aΤ8tχ,ćUF+01uл;J_)rNG c[uswTuy vrѭK0ī l66@OnQa7.81Uh {Z\{+ ٵsXظ4ș wΰ4kO4)_O)pذzS$o=2M3n3e`OI2ל㇭h!5 dؚjCWTm:ᎾXTO~V?I~miŔl\f4zJ M l4X%Rd/ z(=Ct*L__O/>j%r8m` B)3T\w%ɓe?0QP| _~wƟlD#\dCwÐ^-tV}VZ 7:3<]gs@vGD9(9| >WfS\)!h—?smfCWzKfeX`T=]$;pLrQq>xF|.א= X~>bj)TH4e6 I*]7lNWqˈ3r"lP!8сMnRSiHZփ_|i'y!387Bqݱ "v BEFA4Dpǎ1 ϼ50b@X)m7S?< ^ ?e4.e*};:s|ȵZBAg?*?{։;)Y}=o9BA<Kxpc%G/tXQ_? 4V-  w&Fr~LPd1t(ߺ>p݂T6a/:~^eπ`^Եi=~x&IH /l"`yfJYFO91ĸm&Qb'@Ick@?AG?6켥u# ΋sמAiQϯ/jdLmbИN;=>yQrźE| _6P ,L. q,^sOOj1~m3C4M}_HC2W$P5Ыg;wAʵ oh84>̀qSEp)nNu!j٨br}AzP;.sٛ_5~T0ϻmb`HrqUhˀ $G5Y;0eb$ -҃ *xi1ke_s!\Xktߝ%O7Vt`V\|XÞ;__BakEϑk0GPT)֊c~͋g:B}v h,, `GZ\wlgw.U_l6566޲#7&p# v$&8͜ -2Xx|<E;{, ;"#5#=2-'q&4茛JR<)^ 1Ht/Y|s5"bF{:.%{c\1,ZVr<#X8Ox )UڝWKs1[YSՑ [8\ho52dw4C7w_bKza7ιhORؕ}iLp;F] |]p-n,U֘zk8)zs9j铋4M3}HA~>\=uCpVwxMN5%QDg]KP5.ѓc`BW7qY+H4^ڦ9g>E00ONnBRhmаbCBr2eeAuBq$O?Ή?ȅ_|Ry)]S˩B>M{t atNWԲEP }qw<_' xK,B˦L,[)HCrfX>t gCHw(o8ECE,]tԚW lu@?H64 ?RPlWm!Ud&C7Dcrnll ~l8{мH> \ bqSD^d#/ \WQz8G_ؓ5LA肋_E]Pb|E=&f??&ª+HsKYMKRw$fU s*f$\e=j(!K%ms \ꩇ ==c8U3(mqLݻY Y u}CɇM*|3KwAX䖡2hJSj&MI8.Fq(P[6U,ͳ'lWIoD -D?5L[P\N׉v1p]9}2!HF%6S2 DQfBRߴ+HbTDD%~sѺ򿿿ZDJMu49>笡5;א:BaV.ʈAy.s63&4Yߥ_t|оy²&KNǐgms8߼)L-lj= )m4Bs1+Kop FUڵA-7=#IJŅ׏!j~nuh:]#׼E3kYء)t7Q JJ%H}ڷÏ;U\AVjT܄e%{d*6ەXe;;|]W=g|'s6gI r~|!"Qw9MR3H]%v΅ĥ)*gPY`Zf*Kė{rdƀ?;xb37d5p0IϤ" ů,EC.>-9DQ"]OsMMLz&{:N.S#|eUcEP"!Ik8dov,ec|С.кرˈ ޞ(,c+,-1/e1m1$G 1 @ߍ I!HR3W9VH֏c!egukT-2'JrD02N9lM yƷp!džO,?(@o56?r?n!Vjj(?E|xIȷ4b`і+Ρ?O9P#lHA5-%E=m:ɝKޒP,19(;6An^\|ʖ0tRK4 vkU z[rpCu&}jnu6_qgkRZ"7;)ל﯊t2kc0O2 zXxD7"7\\Ljgf7뮽3_Fn[?ZnʞSczJ;l7  IzJ.K½i&=W8>d+5\I|OI|ԥ`)dXd ]XݲM8ǣ([ G&Xޤ/K~!uk(T O:/ 4ݯ8~&͛ gcJs\L xx,l)jNsYl~E I ?iLxF'6knE3лxys;уMGؗ|q! F2 Aަ{<4P61~"W9oy;_ļA^A`{ oz *{/MFm1[uhAAlI1 }E;0w[r"G7@++h)? A6/+h?ۏ >xMn7V:hclpYt/s`% )lҸ=NSKfۺ$% <&"gN&LJfyDf`5#)J/":Ì,(rڸ+N=si׌80~˼i=< Z~YpB{`l 뽫1\ [ fXC]?dmgVU-M-=g2sه!G1,zW+Yi, uts#ٗv6[Dcjxt[PB_jh:dem1߰s IUcTk5ӯ ItU U^(7HqY199s&o|U qm+3<).<*NwӴT':!. wy1Ygs:!^z*,a{WԜancC#6};S#X:b3q Ղ^\,(`fYm]3)𾴻Jι da1I!Њk6AQ}5 ZfaZ휼$%OljpbϝߢqLU_q 9*?<,;13X󞂪-fL&IAbWU!ĺ{wshNyaA0q"(_'>8J(+@sQJA ~s\hO8jwOf [0 1;fV'nfc\D}; LɃ;ҦJl,~z ?#%kAB.F)NAǟ\`̩׆SZi( Z/_Y;EqwWw:v-b@ؕH!4mxpdZr wi̴妚Oy+¦&:BX_r< afʄ{Q^eR2!fxj8zʀٯY,Kq!,ƺuʑ V9.;|KrVI.-艓Bf_B//hcT`\UTt+sr"W&}[o%v %5 *R*{! XMȇ[]v=PwY: Wxʀ#(4"9Vb,|Xp>TkiINC{۴5Yo^TH5 j iȤ,Ur#KrBJ),LrY;D86]"Q}ɝ'dpD4iEo(]#cQyk Og.0%toqkaQ6Sy}}^ ֵ|@rM${;?}X_ZoI*^.tКưEUJXP{UK 3l9d`C^XX⭱џ7,Aӡoa meM^,l޸z_f`b%֘^.x4l xkGTg*_oFϕ}+Zjocog8=O|1?V*B]Oˆ:I>l}Kąkd>&gCn% ~ snMݏpЇ{ɯ.]Is1h }hPbOͮ-->,Bz. %NTfESgile]rƞYsSz{8YpE˛&,hP:ǖ )_YBu|*F}:]BfaWk"Ī/&qnv6fQ=]S̻'VKG}BYLQǶ-`v/f |ln5d ew'%& TU2w`" 7|:?_HxJs:ɳ&CFovOb{5֍KĀ]M݄/#R 6ɀD0T1 g?Y9 4јwzό=; [g`秜 25?EB!_u"e`%% {J&'f_Ëho aA;ƸŪBX}^FSXyl~v|LlFEHD ;PwZmd?V. Y5 צ (x6VAۣ{|0m+}cٰMkVGc?yn/; \H21*}Y|)°B@>ǭ Q1! {8]S=>=Y:77l5;j.ޥ w( \ھrm]CK=)b+s*a>b&ׄl/[^720ʛ?h2t9΄GM3'Qi9{;V邵,i{YlZ5|[WLzg$xhnFm$_Ug7; DaM/X,Sº׼PP>=e0tT]aa2 UeXP@?AGP_: z?ԻYqF&Н۟kn0:׷,dm>.hi~g PY ;ESmx;Y_#+g T'ܡـnwC*{ eʕ|eÄ):Cek܋"7LsiIj׆>]N Z92om%Lhruɋv*:qWÒrWSwV8UaI{j 7K?Ы"t5 C4]28~o/^6V?ϥYRk64n+NtjC UYP!Ol޻h,em" n5&:[2!ciL.ʑd/>mq˒V>o3*r0 L#\\f'H.\wo4Ȕ\.|N|d=^U*%:~sl-W8We}W)k6|{7pKDSvxR8SyIP2v~[YX_VgM_3B[g<3d*;rU>4x;u kK L6#w~Y] _^ |Ek)(}m%' tz6ѐccɬk~灲v{x-wH.P%U.ЖjEAfP;E\js^I|baܺ/a^;4n5LatF.R6QF#&oVŚѐqaҷ]G\E;"VNEHW%[]pb} :XLl^5>NZ1*ZK*Fll 4jcR}?X@të:/g:-$<8Npԋ#~TUxGAx䑋3_]GgU9:Ͷ9ه $ }}~9 \=L"u﹆j/.Cn @aʼnn's+GC_Y̋wL:0Nan(^a{[5,aiW]b8DNJ1@pj}<3T/xPn>j+PR0~`yf'?) 쌇C>̱``3}.FKXg.DrDwYAC]ٛk4HiAV_m0v~Ȃ vO+_пY:{GYNk7IE,|Kʼ %|M@UL9|RE¹^\Hr~aMvǥnP2!1-,}w4 RZH;j֦E.DcuN{7X[&,Z/_Jq*r8߄$Ҋfp8 C/gٚ_\H ..ap:"VFؗb8W-lT8q" Q}%TVno cz8kEpelJ!ľаKܟăoS^W0@vPgSR,fdtiUmsE`i ~?d5avR*Y 2BZ`xez[9_Q^I2laJ:`k۴|pH-{_zXÀ/S lg=dYQڒ6t>m!AZw_iҔ&N(dwC h/K@di{bb۶KAEHPy#.89F/`Y날lh@1\<釖*OOI{l5ɂVk IEC]3SކǂPxL&e/9ؙ7ݚeu88|l=OލQzm`&CtFUT(0zH1h[*+ ­2{?>tC{EĨw0V>=wlq#UzH͸N`Uؙ0yUFAlŞ0bUD_W;W~O_xOȅ'),ٵSjjEݱ{-3K$ u|yqZe55F"'\  nE^OI4EGf>@\:^AaFqiW07-;񄖬SoGhYc_3B~c%z !>a#g̰zb]~4P`SbO`ŝ^. ؒAN 8hExBlԣ= eWш +#Ph+DO%;%,+4-bQq a{h~fE!`HmG[w"3#[hW sؔdM@ߚ6i>΄} Rt U‘v+闥`-6Elʒ~yݪ8l賬vZTD8#F[ql [0; ɯeߌi~X,xGgf߻E:[W0iOל~Hq9齊H.rز]' J&k_vv l'z:dl$ #¦fme:KkAS{m.Ef{)?dcl=sCF) /fb!+y(xAyyPGY/ǁn]v=_o@a+'Ъ~gJcbv AD/kmBfo]- I=W2=dBk ²kow0P:f=[N$l,l\h76S|6`xOXY'(+ KCrw3FSqe]XpW!!M 4p5W,d {6yBh.J@vVft79.udZWr7\]B]4#Cg,c K&vXB/~[)6f^]Gԑ7Ml7K~F)-(񋟛F=Qg@^$j}|?ګ=2k>>rz]߸~.A yVd AS# ,WMASS/data/bacteria.rda0000644000176000001440000000135211754562034014230 0ustar ripleyusers ͘[W@7ZDx+ P_}j- =D|tCv$ds~?d3LJe-BILZ\&1i7?'Z_9cX(.sy\S.y",]Wyǔ뉎sY%?ׇJ/3;7NSIT8IO)14HC>ܣ6ru#~Uk]D/_*@^8~cj& O#4"vu͖JyyTlU*wg:@(leg<*L/I$ɊHv}:r4kYecѶjXO|t\ttVCW淦|7l#Dh% \AXE*p pM[ E'q_CG% Mt#*=-Ht9}ǩ0ٞqy/Q$dط9dߌFg4Nc"\/X5NMASS/data/nlschools.rda0000644000176000001440000002055511754562034014470 0ustar ripleyusers {q[M X`Iv"vYJlNvz>.@foo}V)2}^*󃷾{W46ۥ|tX 8jt9y0l ڴx7>Ɯ~̵Or$p7}ףogXvcBw|>kދ{r?o-zfbk5jG^̳sa;AJ\cg1N\4 Xo'ڟĸݘq~еcc̖e'x8gv}|؉7m6hx"k Z7+g2bۊGXZ}}ӬAgc1nT܎G[17k`aFc͘k=h=ARj KՏscݸ_-UFfy+hMRT}Ru_n~<{/A7> ?qQu'냸 KRZsX jM1|ݐ"KooZg u- D$?ثhS\wJкk9<ߍҾkuJ@/G{/ ^s'RVӎ;O"c5εx.>ZwT7twbnT=fN^mjJ۱N`}ݏ9wz&}RI99#ޏ; @GKu ER|T߈ww3V!~eG ߈GRs}nX?~Q{<~< |3Wn*UOr-զte 'ƹR}b~fKѿtϮ “'.d_RsN~a4f#&G;aA0ft YלqIT4q]3izYj֓+v#܊qRc]Hd}5akS[3YƅRnAA91e/ ܬ(|&G{K; ]K “fK;OJebK J9R\rܟ?qTj^us_Y\ُ#,cx\bkC ]'T쪝" N{TE@̃NVO7b?z??yK >CD [-{џx\IXd W4Ue{IRBYP{"Ff|\=!6. OzǥJ5lzJs\jO:oΩ޳Fb&t%Y3^h ;!vY >~V>_J9#5o ?`dϣIjM=6jj~85Wl+ԃ喵3Wzv .pyu~n?aϼ}}.'v?q9ڰkf>ZLJq?zw|FOgNǽ_޻;oY\}kw w'K3gq5̏8?3rvw,w޳zڱkf?neqIVbZDcgžC~py[>sq!_fu?Ni.a3Y=^u:;#F[#eי \=!r}z |<*;Vyhe[.Ovuq?Y,_!;s>Q)n[q:S,tev#;on}O:Le}]/Wߟa~^fo9߾nϳs{ݿ#?/e%2fuY,r~z<ό[岍w;գ'uL^I/'ۯ,>y[ߵl^K|?}&NO=8>˧+mV;Yp/3}_f3f%k8)>N9Pv~9Nwe8-{}k?wNG_duL_2| zou,kY|:0UY/Y\{,;#̓}1l~{<|ܷ: O}CeO;q/nDzs̿]w|\ge~00Ŭ{3_:m/E.;l 3{|͎=gߙ9%ٹ˕E+kg03wq^?i5q1݀S+C~uy˷˃fy._c=&ϿyCw]_fxF޼N^gf)޿s32?e|]Y]07wn2>zhg3~dey90;8Q2g?or 3wY>ɷVȾ;~'e1_Ǔ}ůgC{{*sgy'zL=e) e-{?wϓ^Wr>z>ŵD%rɏɞGgu=<:ק2;ߣ|^CVxa~k}Е]^7^xC!sSON^2ܚ|j #>}I3E{0s poDx0{Ix0ƀ# ┡w=a4<0S1}M:aX"O6. )ak ="\W 7a؟" F pÉ>Cp&=}P!ׇ#}+}EWl}%Tdtӭ<CGDopzGH)GJΑb;VlNJX}D997F'=UBOק:T)3Ep)9gJ9=ޡ>vSI<(ľJbDNA+j~_[_Á>(%T+U>TBU*}޾ @嚇soU/E׾[?|uR~OO廚³5;|3;i4 .=ôŹw| );ӻiǤE!~oVf-{5Mgr{vp'eHttކ7#kkҗg #_G-j? W[l P곗߬\77 9xX:k_H|yY8x:_n+Yx5.oj#BƅK^@J1 \9 ~)uJ̸tbl]RR[1ܒ. >'8hLCLޖ7c8J)O6\q\ҟ+Yi~i n2}"/rEn/J'5rV#zMxE8W4BU?k14U8rz/ rIt_zA>6=T%fuk=\s~ah#5c<:И ]'gt?wlڸR}-kz>?Kr9! L"ġf^?!tv ]/b??yC<Ӽ@ZyBx5TKsBꮞkNjr^g ĀZx\wyroh\H|u EJk:Z;XHR|o]='|f2'QlϚ?wRz6vGsY=fs9z̩5l <]e\5;xΪ/~x<}Q^EEuh>̔29&GWX(5G7t%?xm=7z̋j@@xm[+|K;5Nr{w.O{+7͜ԌQk{kEcl-?ӳGk!ѿugnz/_47/feuk|<ᅌ~4^|TmM{hM[m?=^K?{;o_O{_+ ^{?e뤗/͗Hz[oo0zO\MASS/man/0000755000176000001440000000000013577102074011626 5ustar ripleyusersMASS/man/fractions.Rd0000644000176000001440000000433511754562034014113 0ustar ripleyusers% file MASS/man/fractions.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{fractions} \alias{fractions} \alias{Math.fractions} \alias{Ops.fractions} \alias{Summary.fractions} \alias{[.fractions} \alias{[<-.fractions} \alias{as.character.fractions} \alias{as.fractions} \alias{is.fractions} \alias{print.fractions} \alias{t.fractions} \title{ Rational Approximation } \description{ Find rational approximations to the components of a real numeric object using a standard continued fraction method. } \usage{ fractions(x, cycles = 10, max.denominator = 2000, \dots) } \arguments{ \item{x}{ Any object of mode numeric. Missing values are now allowed. } \item{cycles}{ The maximum number of steps to be used in the continued fraction approximation process. } \item{max.denominator}{ An early termination criterion. If any partial denominator exceeds \code{max.denominator} the continued fraction stops at that point. } \item{\dots}{ arguments passed to or from other methods. }} \value{ An object of class \code{"fractions"}. A structure with \code{.Data} component the same as the input numeric \code{x}, but with the rational approximations held as a character vector attribute, \code{"fracs"}. Arithmetic operations on \code{"fractions"} objects are possible. } \details{ Each component is first expanded in a continued fraction of the form \code{x = floor(x) + 1/(p1 + 1/(p2 + \dots)))} where \code{p1}, \code{p2}, \dots are positive integers, terminating either at \code{cycles} terms or when a \code{pj > max.denominator}. The continued fraction is then re-arranged to retrieve the numerator and denominator as integers. The numerators and denominators are then combined into a character vector that becomes the \code{"fracs"} attribute and used in printed representations. Arithmetic operations on \code{"fractions"} objects have full floating point accuracy, but the character representation printed out may not. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth Edition. Springer. } \seealso{ \code{\link{rational}} } \examples{ X <- matrix(runif(25), 5, 5) zapsmall(solve(X, X/5)) # print near-zeroes as zero fractions(solve(X, X/5)) fractions(solve(X, X/5)) + 1 } \keyword{math} MASS/man/leuk.Rd0000644000176000001440000000311711754562034013060 0ustar ripleyusers% file MASS/man/leuk.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{leuk} \alias{leuk} \title{ Survival Times and White Blood Counts for Leukaemia Patients } \description{ A data frame of data from 33 leukaemia patients. } \usage{ leuk } \format{ A data frame with columns: \describe{ \item{\code{wbc}}{ white blood count. } \item{\code{ag}}{ a test result, \code{"present"} or \code{"absent"}. } \item{\code{time}}{ survival time in weeks. } } } \details{ Survival times are given for 33 patients who died from acute myelogenous leukaemia. Also measured was the patient's white blood cell count at the time of diagnosis. The patients were also factored into 2 groups according to the presence or absence of a morphologic characteristic of white blood cells. Patients termed AG positive were identified by the presence of Auer rods and/or significant granulation of the leukaemic cells in the bone marrow at the time of diagnosis. } \source{ Cox, D. R. and Oakes, D. (1984) \emph{Analysis of Survival Data}. Chapman & Hall, p. 9. Taken from Feigl, P. & Zelen, M. (1965) Estimation of exponential survival probabilities with concomitant information. \emph{Biometrics} \bold{21}, 826--838. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ library(survival) plot(survfit(Surv(time) ~ ag, data = leuk), lty = 2:3, col = 2:3) # now Cox models leuk.cox <- coxph(Surv(time) ~ ag + log(wbc), leuk) summary(leuk.cox) } \keyword{datasets} MASS/man/lm.ridge.Rd0000644000176000001440000000500711754562034013621 0ustar ripleyusers% file MASS/man/lm.ridge.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{lm.ridge} \alias{lm.ridge} \alias{plot.ridgelm} \alias{print.ridgelm} \alias{select} \alias{select.ridgelm} \title{ Ridge Regression } \description{ Fit a linear model by ridge regression. } \usage{ lm.ridge(formula, data, subset, na.action, lambda = 0, model = FALSE, x = FALSE, y = FALSE, contrasts = NULL, \dots) } \arguments{ \item{formula}{ a formula expression as for regression models, of the form \code{response ~ predictors}. See the documentation of \code{formula} for other details. \code{\link{offset}} terms are allowed. } \item{data}{ an optional data frame in which to interpret the variables occurring in \code{formula}. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{na.action}{ a function to filter missing data. } \item{lambda}{ A scalar or vector of ridge constants. } \item{model}{ should the model frame be returned? Not implemented. } \item{x}{ should the design matrix be returned? Not implemented. } \item{y}{ should the response be returned? Not implemented. } \item{contrasts}{ a list of contrasts to be used for some or all of factor terms in the formula. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}. } \item{\dots}{ additional arguments to \code{\link{lm.fit}}. }} \details{ If an intercept is present in the model, its coefficient is not penalized. (If you want to penalize an intercept, put in your own constant term and remove the intercept.) } \value{ A list with components \item{coef}{ matrix of coefficients, one row for each value of \code{lambda}. Note that these are not on the original scale and are for use by the \code{\link{coef}} method. } \item{scales}{ scalings used on the X matrix. } \item{Inter}{ was intercept included? } \item{lambda}{ vector of lambda values } \item{ym}{ mean of \code{y} } \item{xm}{ column means of \code{x} matrix } \item{GCV}{ vector of GCV values } \item{kHKB}{ HKB estimate of the ridge constant. } \item{kLW}{ L-W estimate of the ridge constant. }} \references{ Brown, P. J. (1994) \emph{Measurement, Regression and Calibration} Oxford. } \seealso{ \code{\link{lm}} } \examples{ longley # not the same as the S-PLUS dataset names(longley)[1] <- "y" lm.ridge(y ~ ., longley) plot(lm.ridge(y ~ ., longley, lambda = seq(0,0.1,0.001))) select(lm.ridge(y ~ ., longley, lambda = seq(0,0.1,0.0001))) } \keyword{models} MASS/man/MASS-internal.Rd0000644000176000001440000000136111754562034014474 0ustar ripleyusers\name{MASS-internal} \alias{enlist} \alias{fbeta} \alias{frequency.polygon} \alias{nclass.freq} \alias{neg.bin} \alias{negexp.SSival} %\alias{pairs.profile} %\alias{plot.profile} %\alias{print.Anova} %\alias{print.abbrev} \title{Internal MASS functions} \description{ Internal MASS functions. } \usage{ enlist(vec) fbeta(x, alpha, beta) frequency.polygon(x, nclass = nclass.freq(x), xlab="", ylab="", \dots) nclass.freq(x) neg.bin(theta = stop("'theta' must be given")) negexp.SSival(mCall, data, LHS) %pairs.profile(x, colours = 2:3, \dots) %plot.profile(x, nseg, \dots) %print.Anova(x, \dots) } \details{ These are not intended to be called by the user. Some are for compatibility with earlier versions of MASS (the book). } \keyword{internal} MASS/man/UScrime.Rd0000644000176000001440000000422111754562034013464 0ustar ripleyusers% file MASS/man/UScrime.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{UScrime} \alias{UScrime} \title{ The Effect of Punishment Regimes on Crime Rates } \description{ Criminologists are interested in the effect of punishment regimes on crime rates. This has been studied using aggregate data on 47 states of the USA for 1960 given in this data frame. The variables seem to have been re-scaled to convenient numbers. } \usage{ UScrime } \format{ This data frame contains the following columns: \describe{ \item{\code{M}}{ percentage of males aged 14--24. } \item{\code{So}}{ indicator variable for a Southern state. } \item{\code{Ed}}{ mean years of schooling. } \item{\code{Po1}}{ police expenditure in 1960. } \item{\code{Po2}}{ police expenditure in 1959. } \item{\code{LF}}{ labour force participation rate. } \item{\code{M.F}}{ number of males per 1000 females. } \item{\code{Pop}}{ state population. } \item{\code{NW}}{ number of non-whites per 1000 people. } \item{\code{U1}}{ unemployment rate of urban males 14--24. } \item{\code{U2}}{ unemployment rate of urban males 35--39. } \item{\code{GDP}}{ gross domestic product per head. } \item{\code{Ineq}}{ income inequality. } \item{\code{Prob}}{ probability of imprisonment. } \item{\code{Time}}{ average time served in state prisons. } \item{\code{y}}{ rate of crimes in a particular category per head of population. } } } \source{ Ehrlich, I. (1973) Participation in illegitimate activities: a theoretical and empirical investigation. \emph{Journal of Political Economy}, \bold{81}, 521--565. Vandaele, W. (1978) Participation in illegitimate activities: Ehrlich revisited. In \emph{Deterrence and Incapacitation}, eds A. Blumstein, J. Cohen and D. Nagin, pp. 270--335. US National Academy of Sciences. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/contr.sdif.Rd0000644000176000001440000000255512315120516014163 0ustar ripleyusers\name{contr.sdif} \alias{contr.sdif} \title{ Successive Differences Contrast Coding } \description{ A coding for factors based on successive differences. } \usage{ contr.sdif(n, contrasts = TRUE, sparse = FALSE) } \arguments{ \item{n}{ The number of levels required. } \item{contrasts}{ logical: Should there be \code{n - 1} columns orthogonal to the mean (the default) or \code{n} columns spanning the space? } \item{sparse}{ logical. If true and the result would be sparse (only true for \code{contrasts = FALSE}), return a sparse matrix. } } \details{ The contrast coefficients are chosen so that the coded coefficients in a one-way layout are the differences between the means of the second and first levels, the third and second levels, and so on. This makes most sense for ordered factors, but does not assume that the levels are equally spaced. } \value{ If \code{contrasts} is \code{TRUE}, a matrix with \code{n} rows and \code{n - 1} columns, and the \code{n} by \code{n} identity matrix if \code{contrasts} is \code{FALSE}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth Edition, Springer. } \seealso{ \code{\link{contr.treatment}}, \code{\link{contr.sum}}, \code{\link{contr.helmert}}. } \examples{ (A <- contr.sdif(6)) zapsmall(ginv(A)) } \keyword{models} MASS/man/rotifer.Rd0000644000176000001440000000172511754562034013575 0ustar ripleyusers% file MASS/man/rotifer.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{rotifer} \alias{rotifer} \title{ Numbers of Rotifers by Fluid Density } \description{ The data give the numbers of rotifers falling out of suspension for different fluid densities. There are two species, \code{pm} \emph{Polyartha major} and \code{kc}, \emph{Keratella cochlearis} and for each species the number falling out and the total number are given. } \usage{ rotifer } \format{ \describe{ \item{\code{density}}{ specific density of fluid. } \item{\code{pm.y}}{ number falling out for \emph{P. major}. } \item{\code{pm.total}}{ total number of \emph{P. major}. } \item{\code{kc.y}}{ number falling out for \emph{K. cochlearis}. } \item{\code{kc.tot}}{ total number of \emph{K. cochlearis}. } } } \source{ D. Collett (1991) \emph{Modelling Binary Data.} Chapman & Hall. p. 217 } \keyword{datasets} MASS/man/area.Rd0000644000176000001440000000272311754562034013032 0ustar ripleyusers% file MASS/man/area.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{area} \alias{area} \title{ Adaptive Numerical Integration } \description{ Integrate a function of one variable over a finite range using a recursive adaptive method. This function is mainly for demonstration purposes. } \usage{ area(f, a, b, \dots, fa = f(a, \dots), fb = f(b, \dots), limit = 10, eps = 1e-05) } \arguments{ \item{f}{ The integrand as an \code{S} function object. The variable of integration must be the first argument. } \item{a}{ Lower limit of integration. } \item{b}{ Upper limit of integration. } \item{\dots}{ Additional arguments needed by the integrand. } \item{fa}{ Function value at the lower limit. } \item{fb}{ Function value at the upper limit. } \item{limit}{ Limit on the depth to which recursion is allowed to go. } \item{eps}{ Error tolerance to control the process. }} \value{ The integral from \code{a} to \code{b} of \code{f(x)}. } \details{ The method divides the interval in two and compares the values given by Simpson's rule and the trapezium rule. If these are within eps of each other the Simpson's rule result is given, otherwise the process is applied separately to each half of the interval and the results added together. } \references{ Venables, W. N. and Ripley, B. D. (1994) \emph{Modern Applied Statistics with S-Plus.} Springer. pp. 105--110. } \examples{ area(sin, 0, pi) # integrate the sin function from 0 to pi. } \keyword{nonlinear} MASS/man/mammals.Rd0000644000176000001440000000176511754562034013556 0ustar ripleyusers% file MASS/man/mammals.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{mammals} \alias{mammals} \title{ Brain and Body Weights for 62 Species of Land Mammals } \description{ A data frame with average brain and body weights for 62 species of land mammals. } \usage{ mammals } \format{ \describe{ \item{\code{body}}{ body weight in kg. } \item{\code{brain}}{ brain weight in g. } \item{\code{name}}{ Common name of species. (Rock hyrax-a = \emph{Heterohyrax brucci}, Rock hyrax-b = \emph{Procavia habessinic.}.) } } } \source{ Weisberg, S. (1985) \emph{Applied Linear Regression.} 2nd edition. Wiley, pp. 144--5. Selected from: Allison, T. and Cicchetti, D. V. (1976) Sleep in mammals: ecological and constitutional correlates. \emph{Science} \bold{194}, 732--734. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/renumerate.Rd0000644000176000001440000000233011754562034014263 0ustar ripleyusers% file MASS/man/renumerate.Rd % copyright (C) 2000 W. N. Venables and B. D. Ripley % \name{renumerate} \alias{renumerate} \alias{renumerate.formula} \title{ Convert a Formula Transformed by 'denumerate' } \description{ \code{\link{denumerate}} converts a formula written using the conventions of \code{\link{loglm}} into one that \code{\link{terms}} is able to process. \code{renumerate} converts it back again to a form like the original. } \usage{ renumerate(x) } \arguments{ \item{x}{ A formula, normally as modified by \code{\link{denumerate}}. }} \value{ A formula where all variables with names of the form \code{.vn}, where \code{n} is an integer, converted to numbers, \code{n}, as allowed by the formula conventions of \code{\link{loglm}}. } \details{ This is an inverse function to \code{\link{denumerate}}. It is only needed since \code{\link{terms}} returns an expanded form of the original formula where the non-marginal terms are exposed. This expanded form is mapped back into a form corresponding to the one that the user originally supplied. } \seealso{ \code{\link{denumerate}} } \examples{ denumerate(~(1+2+3)^3 + a/b) ## ~ (.v1 + .v2 + .v3)^3 + a/b renumerate(.Last.value) ## ~ (1 + 2 + 3)^3 + a/b } \keyword{models} MASS/man/rlm.Rd0000644000176000001440000001503312331467127012711 0ustar ripleyusers% file MASS/man/rlm.Rd % copyright (C) 1998-2014 B. D. Ripley % \name{rlm} \alias{rlm} \alias{rlm.default} \alias{rlm.formula} \alias{print.rlm} \alias{predict.rlm} \alias{psi.bisquare} \alias{psi.hampel} \alias{psi.huber} \title{ Robust Fitting of Linear Models } \description{ Fit a linear model by robust regression using an M estimator. } \usage{ rlm(x, \dots) \method{rlm}{formula}(formula, data, weights, \dots, subset, na.action, method = c("M", "MM", "model.frame"), wt.method = c("inv.var", "case"), model = TRUE, x.ret = TRUE, y.ret = FALSE, contrasts = NULL) \method{rlm}{default}(x, y, weights, \dots, w = rep(1, nrow(x)), init = "ls", psi = psi.huber, scale.est = c("MAD", "Huber", "proposal 2"), k2 = 1.345, method = c("M", "MM"), wt.method = c("inv.var", "case"), maxit = 20, acc = 1e-4, test.vec = "resid", lqs.control = NULL) psi.huber(u, k = 1.345, deriv = 0) psi.hampel(u, a = 2, b = 4, c = 8, deriv = 0) psi.bisquare(u, c = 4.685, deriv = 0) } \arguments{ \item{formula}{ a formula of the form \code{y ~ x1 + x2 + \dots}. } \item{data}{ data frame from which variables specified in \code{formula} are preferentially to be taken. } \item{weights}{ a vector of prior weights for each case. } \item{subset}{ An index vector specifying the cases to be used in fitting. } \item{na.action}{ A function to specify the action to be taken if \code{NA}s are found. The \sQuote{factory-fresh} default action in \R is \code{\link{na.omit}}, and can be changed by \code{\link{options}(na.action=)}. } \item{x}{ a matrix or data frame containing the explanatory variables. } \item{y}{ the response: a vector of length the number of rows of \code{x}. } \item{method}{ currently either M-estimation or MM-estimation or (for the \code{formula} method only) find the model frame. MM-estimation is M-estimation with Tukey's biweight initialized by a specific S-estimator. See the \sQuote{Details} section. } \item{wt.method}{ are the weights case weights (giving the relative importance of case, so a weight of 2 means there are two of these) or the inverse of the variances, so a weight of two means this error is half as variable? } \item{model}{ should the model frame be returned in the object? } \item{x.ret}{ should the model matrix be returned in the object? } \item{y.ret}{ should the response be returned in the object? } \item{contrasts}{ optional contrast specifications: see \code{\link{lm}}. } \item{w}{ (optional) initial down-weighting for each case. } \item{init}{ (optional) initial values for the coefficients OR a method to find initial values OR the result of a fit with a \code{coef} component. Known methods are \code{"ls"} (the default) for an initial least-squares fit using weights \code{w*weights}, and \code{"lts"} for an unweighted least-trimmed squares fit with 200 samples. } \item{psi}{ the psi function is specified by this argument. It must give (possibly by name) a function \code{g(x, \dots, deriv)} that for \code{deriv=0} returns psi(x)/x and for \code{deriv=1} returns psi'(x). Tuning constants will be passed in via \code{\dots}. } \item{scale.est}{ method of scale estimation: re-scaled MAD of the residuals (default) or Huber's proposal 2 (which can be selected by either \code{"Huber"} or \code{"proposal 2"}). } \item{k2}{ tuning constant used for Huber proposal 2 scale estimation. } \item{maxit}{ the limit on the number of IWLS iterations. } \item{acc}{ the accuracy for the stopping criterion. } \item{test.vec}{ the stopping criterion is based on changes in this vector. } \item{\dots}{ additional arguments to be passed to \code{rlm.default} or to the \code{psi} function. } \item{lqs.control}{ An optional list of control values for \code{\link{lqs}}. } \item{u}{ numeric vector of evaluation points. } \item{k, a, b, c}{ tuning constants. } \item{deriv}{ \code{0} or \code{1}: compute values of the psi function or of its first derivative. } } \value{ An object of class \code{"rlm"} inheriting from \code{"lm"}. Note that the \code{df.residual} component is deliberately set to \code{NA} to avoid inappropriate estimation of the residual scale from the residual mean square by \code{"lm"} methods. The additional components not in an \code{lm} object are \item{s}{ the robust scale estimate used } \item{w}{ the weights used in the IWLS process } \item{psi}{ the psi function with parameters substituted } \item{conv}{ the convergence criteria at each iteration } \item{converged}{ did the IWLS converge? } \item{wresid}{ a working residual, weighted for \code{"inv.var"} weights only. } } \details{ Fitting is done by iterated re-weighted least squares (IWLS). Psi functions are supplied for the Huber, Hampel and Tukey bisquare proposals as \code{psi.huber}, \code{psi.hampel} and \code{psi.bisquare}. Huber's corresponds to a convex optimization problem and gives a unique solution (up to collinearity). The other two will have multiple local minima, and a good starting point is desirable. Selecting \code{method = "MM"} selects a specific set of options which ensures that the estimator has a high breakdown point. The initial set of coefficients and the final scale are selected by an S-estimator with \code{k0 = 1.548}; this gives (for \eqn{n \gg p}{n >> p}) breakdown point 0.5. The final estimator is an M-estimator with Tukey's biweight and fixed scale that will inherit this breakdown point provided \code{c > k0}; this is true for the default value of \code{c} that corresponds to 95\% relative efficiency at the normal. Case weights are not supported for \code{method = "MM"}. } \references{ P. J. Huber (1981) \emph{Robust Statistics}. Wiley. F. R. Hampel, E. M. Ronchetti, P. J. Rousseeuw and W. A. Stahel (1986) \emph{Robust Statistics: The Approach based on Influence Functions}. Wiley. A. Marazzi (1993) \emph{Algorithms, Routines and S Functions for Robust Statistics}. Wadsworth & Brooks/Cole. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{lm}}, \code{\link{lqs}}. } \examples{ summary(rlm(stack.loss ~ ., stackloss)) rlm(stack.loss ~ ., stackloss, psi = psi.hampel, init = "lts") rlm(stack.loss ~ ., stackloss, psi = psi.bisquare) } \keyword{models} \keyword{robust} MASS/man/studres.Rd0000644000176000001440000000140011754562034013602 0ustar ripleyusers% file MASS/man/studres.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{studres} \alias{studres} \title{ Extract Studentized Residuals from a Linear Model } \description{ The Studentized residuals. Like standardized residuals, these are normalized to unit variance, but the Studentized version is fitted ignoring the current data point. (They are sometimes called jackknifed residuals). } \usage{ studres(object) } \arguments{ \item{object}{ any object representing a linear model. }} \value{ The vector of appropriately transformed residuals. } \seealso{ \code{\link{residuals}}, \code{\link{stdres}} } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{models} MASS/man/farms.Rd0000644000176000001440000000304711754562034013232 0ustar ripleyusers% file MASS/man/farms.Rd % copyright (C) 1998-9 W. N. Venables and B. D. Ripley % \name{farms} \alias{farms} \title{ Ecological Factors in Farm Management } \description{ The \code{farms} data frame has 20 rows and 4 columns. The rows are farms on the Dutch island of Terschelling and the columns are factors describing the management of grassland. } \usage{ farms } \format{ This data frame contains the following columns: \describe{ \item{\code{Mois}}{ Five levels of soil moisture -- level 3 does not occur at these 20 farms. } \item{\code{Manag}}{ Grassland management type (\code{SF} = standard, \code{BF} = biological, \code{HF} = hobby farming, \code{NM} = nature conservation). } \item{\code{Use}}{ Grassland use (\code{U1} = hay production, \code{U2} = intermediate, \code{U3} = grazing). } \item{\code{Manure}}{ Manure usage -- classes \code{C0} to \code{C4}. } } } \source{ J.C. Gower and D.J. Hand (1996) \emph{Biplots}. Chapman & Hall, Table 4.6. Quoted as from:\cr R.H.G. Jongman, C.J.F. ter Braak and O.F.R. van Tongeren (1987) \emph{Data Analysis in Community and Landscape Ecology.} PUDOC, Wageningen. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ farms.mca <- mca(farms, abbrev = TRUE) # Use levels as names eqscplot(farms.mca$cs, type = "n") text(farms.mca$rs, cex = 0.7) text(farms.mca$cs, labels = dimnames(farms.mca$cs)[[1]], cex = 0.7) } \keyword{datasets} MASS/man/profile.glm.Rd0000644000176000001440000000403511754562034014336 0ustar ripleyusers% file MASS/man/profile.glm.Rd % copyright (C) 1999-2008 W. N. Venables and B. D. Ripley \name{profile.glm} \alias{profile.glm} \title{Method for Profiling glm Objects} \description{ Investigates the profile log-likelihood function for a fitted model of class \code{"glm"}. } \usage{ \method{profile}{glm}(fitted, which = 1:p, alpha = 0.01, maxsteps = 10, del = zmax/5, trace = FALSE, \dots) } \arguments{ \item{fitted}{the original fitted model object.} \item{which}{the original model parameters which should be profiled. This can be a numeric or character vector. By default, all parameters are profiled.} \item{alpha}{highest significance level allowed for the profile t-statistics.} \item{maxsteps}{maximum number of points to be used for profiling each parameter.} \item{del}{suggested change on the scale of the profile t-statistics. Default value chosen to allow profiling at about 10 parameter values.} \item{trace}{logical: should the progress of profiling be reported?} \item{\dots}{further arguments passed to or from other methods.} } \value{ A list of classes \code{"profile.glm"} and \code{"profile"} with an element for each parameter being profiled. The elements are data-frames with two variables \item{par.vals}{a matrix of parameter values for each fitted model.} \item{tau}{the profile t-statistics.} } \details{ The profile t-statistic is defined as the square root of change in sum-of-squares divided by residual standard error with an appropriate sign. } \author{ Originally, D. M. Bates and W. N. Venables. (For S in 1996.) } \seealso{ \code{\link{glm}}, \code{\link{profile}}, \code{\link{plot.profile}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ldose <- rep(0:5, 2) numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) sex <- factor(rep(c("M", "F"), c(6, 6))) SF <- cbind(numdead, numalive = 20 - numdead) budworm.lg <- glm(SF ~ sex*ldose, family = binomial) pr1 <- profile(budworm.lg) plot(pr1) pairs(pr1) } \keyword{regression} \keyword{models} MASS/man/ldahist.Rd0000644000176000001440000000362411754562034013553 0ustar ripleyusers% file MASS/man/ldahist.Rd % copyright (C) 1998-9 W. N. Venables and B. D. Ripley % \name{ldahist} \alias{ldahist} \title{ Histograms or Density Plots of Multiple Groups } \description{ Plot histograms or density plots of data on a single Fisher linear discriminant. } \usage{ ldahist(data, g, nbins = 25, h, x0 = - h/1000, breaks, xlim = range(breaks), ymax = 0, width, type = c("histogram", "density", "both"), sep = (type != "density"), col = 5, xlab = deparse(substitute(data)), bty = "n", \dots) } \arguments{ \item{data}{ vector of data. Missing values (\code{NA}s) are allowed and omitted. } \item{g}{ factor or vector giving groups, of the same length as \code{data}. } \item{nbins}{ Suggested number of bins to cover the whole range of the data. } \item{h}{ The bin width (takes precedence over \code{nbins}). } \item{x0}{ Shift for the bins - the breaks are at \code{x0 + h * (\dots, -1, 0, 1, \dots)} } \item{breaks}{ The set of breakpoints to be used. (Usually omitted, takes precedence over \code{h} and \code{nbins}). } \item{xlim}{ The limits for the x-axis. } \item{ymax}{ The upper limit for the y-axis. } \item{width}{ Bandwidth for density estimates. If missing, the Sheather-Jones selector is used for each group separately. } \item{type}{ Type of plot. } \item{sep}{ Whether there is a separate plot for each group, or one combined plot. } \item{col}{ The colour number for the bar fill. } \item{xlab}{ label for the plot x-axis. By default, this will be the name of \code{data}. } \item{bty}{ The box type for the plot - defaults to none. } \item{\dots}{ additional arguments to \code{polygon}. }} \section{Side Effects}{ Histogram and/or density plots are plotted on the current device. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{plot.lda}}. } \keyword{dplot} \keyword{hplot} MASS/man/stormer.Rd0000644000176000001440000000244211754562034013613 0ustar ripleyusers% file MASS/man/stormer.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{stormer} \alias{stormer} \title{ The Stormer Viscometer Data } \description{ The stormer viscometer measures the viscosity of a fluid by measuring the time taken for an inner cylinder in the mechanism to perform a fixed number of revolutions in response to an actuating weight. The viscometer is calibrated by measuring the time taken with varying weights while the mechanism is suspended in fluids of accurately known viscosity. The data comes from such a calibration, and theoretical considerations suggest a nonlinear relationship between time, weight and viscosity, of the form \code{Time = (B1*Viscosity)/(Weight - B2) + E} where \code{B1} and \code{B2} are unknown parameters to be estimated, and \code{E} is error. } \usage{ stormer } \format{ The data frame contains the following components: \describe{ \item{\code{Viscosity}}{ viscosity of fluid. } \item{\code{Wt}}{ actuating weight. } \item{\code{Time}}{ time taken. } } } \source{ E. J. Williams (1959) \emph{Regression Analysis.} Wiley. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/bcv.Rd0000644000176000001440000000154611754562034012676 0ustar ripleyusers% file MASS/man/bcv.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{bcv} \alias{bcv} \title{ Biased Cross-Validation for Bandwidth Selection } \description{ Uses biased cross-validation to select the bandwidth of a Gaussian kernel density estimator. } \usage{ bcv(x, nb = 1000, lower, upper) } \arguments{ \item{x}{ a numeric vector } \item{nb}{ number of bins to use. } \item{lower, upper}{ Range over which to minimize. The default is almost always satisfactory. }} \value{ a bandwidth } \references{ Scott, D. W. (1992) \emph{Multivariate Density Estimation: Theory, Practice, and Visualization.} Wiley. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{ucv}}, \code{\link{width.SJ}}, \code{\link{density}} } \examples{ bcv(geyser$duration) } \keyword{dplot} MASS/man/Rabbit.Rd0000644000176000001440000000310611754562034013321 0ustar ripleyusers% file MASS/man/Rabbit.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Rabbit} \alias{Rabbit} \title{ Blood Pressure in Rabbits } \description{ Five rabbits were studied on two occasions, after treatment with saline (control) and after treatment with the \eqn{5-HT_3} antagonist MDL 72222. After each treatment ascending doses of phenylbiguanide were injected intravenously at 10 minute intervals and the responses of mean blood pressure measured. The goal was to test whether the cardiogenic chemoreflex elicited by phenylbiguanide depends on the activation of \eqn{5-HT_3} receptors. } \usage{ Rabbit } \format{ This data frame contains 60 rows and the following variables: \describe{ \item{\code{BPchange}}{ change in blood pressure relative to the start of the experiment. } \item{\code{Dose}}{ dose of Phenylbiguanide in micrograms. } \item{\code{Run}}{ label of run (\code{"C1"} to \code{"C5"}, then \code{"M1"} to \code{"M5"}). } \item{\code{Treatment}}{ placebo or the \eqn{5-HT_3} antagonist MDL 72222. } \item{\code{Animal}}{ label of animal used (\code{"R1"} to \code{"R5"}). } } } \source{ J. Ludbrook (1994) Repeated measurements and multiple comparisons in cardiovascular research. \emph{Cardiovascular Research} \bold{28}, 303--311.\cr [The numerical data are not in the paper but were supplied by Professor Ludbrook] } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/forbes.Rd0000644000176000001440000000122011754562034013371 0ustar ripleyusers% file MASS/man/forbes.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{forbes} \alias{forbes} \title{ Forbes' Data on Boiling Points in the Alps } \description{ A data frame with 17 observations on boiling point of water and barometric pressure in inches of mercury. } \usage{ forbes } \format{ \describe{ \item{\code{bp}}{ boiling point (degrees Farenheit). } \item{\code{pres}}{ barometric pressure in inches of mercury. } } } \source{ A. C. Atkinson (1985) \emph{Plots, Transformations and Regression.} Oxford. S. Weisberg (1980) \emph{Applied Linear Regression.} Wiley. } \keyword{datasets} MASS/man/gamma.shape.glm.Rd0000644000176000001440000000431611754562034015061 0ustar ripleyusers% file MASS/man/gamma.shape.glm.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{gamma.shape} \alias{gamma.shape} \alias{gamma.shape.glm} \alias{print.gamma.shape} \title{ Estimate the Shape Parameter of the Gamma Distribution in a GLM Fit } \description{ Find the maximum likelihood estimate of the shape parameter of the gamma distribution after fitting a \code{Gamma} generalized linear model. } \usage{ \method{gamma.shape}{glm}(object, it.lim = 10, eps.max = .Machine$double.eps^0.25, verbose = FALSE, \dots) } \arguments{ \item{object}{ Fitted model object from a \code{Gamma} family or \code{quasi} family with \code{variance = "mu^2"}. } \item{it.lim}{ Upper limit on the number of iterations. } \item{eps.max}{ Maximum discrepancy between approximations for the iteration process to continue. } \item{verbose}{ If \code{TRUE}, causes successive iterations to be printed out. The initial estimate is taken from the deviance. } \item{\dots}{ further arguments passed to or from other methods. }} \value{ List of two components \item{alpha}{ the maximum likelihood estimate } \item{SE}{ the approximate standard error, the square-root of the reciprocal of the observed information. }} \details{ A glm fit for a Gamma family correctly calculates the maximum likelihood estimate of the mean parameters but provides only a crude estimate of the dispersion parameter. This function takes the results of the glm fit and solves the maximum likelihood equation for the reciprocal of the dispersion parameter, which is usually called the shape (or exponent) parameter. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{gamma.dispersion}} } \examples{ clotting <- data.frame( u = c(5,10,15,20,30,40,60,80,100), lot1 = c(118,58,42,35,27,25,21,19,18), lot2 = c(69,35,26,21,18,16,13,12,12)) clot1 <- glm(lot1 ~ log(u), data = clotting, family = Gamma) gamma.shape(clot1) gm <- glm(Days + 0.1 ~ Age*Eth*Sex*Lrn, quasi(link=log, variance="mu^2"), quine, start = c(3, rep(0,31))) gamma.shape(gm, verbose = TRUE) summary(gm, dispersion = gamma.dispersion(gm)) # better summary } \keyword{models} MASS/man/summary.negbin.Rd0000644000176000001440000000336011754562034015056 0ustar ripleyusers% file MASS/man/summary.negbin.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{summary.negbin} \alias{summary.negbin} \alias{print.summary.negbin} \title{ Summary Method Function for Objects of Class 'negbin' } \description{ Identical to \code{summary.glm}, but with three lines of additional output: the ML estimate of theta, its standard error, and twice the log-likelihood function. } \usage{ \method{summary}{negbin}(object, dispersion = 1, correlation = FALSE, \dots) } \arguments{ \item{object}{ fitted model object of class \code{negbin} inheriting from \code{glm} and \code{lm}. Typically the output of \code{glm.nb}. } \item{dispersion}{ as for \code{summary.glm}, with a default of 1. } \item{correlation}{ as for \code{summary.glm}. } \item{\dots}{ arguments passed to or from other methods. }} \value{ As for \code{summary.glm}; the additional lines of output are not included in the resultant object. } \section{Side Effects}{ A summary table is produced as for \code{summary.glm}, with the additional information described above. } \details{ \code{summary.glm} is used to produce the majority of the output and supply the result. This function is a method for the generic function \code{summary()} for class \code{"negbin"}. It can be invoked by calling \code{summary(x)} for an object \code{x} of the appropriate class, or directly by calling \code{summary.negbin(x)} regardless of the class of the object. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{summary}}, \code{\link{glm.nb}}, \code{\link{negative.binomial}}, \code{\link{anova.negbin}} } \keyword{models} \examples{ summary(glm.nb(Days ~ Eth*Age*Lrn*Sex, quine, link = log)) } MASS/man/rational.Rd0000644000176000001440000000303711754562034013732 0ustar ripleyusers% file MASS/man/rational.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{rational} \alias{rational} \alias{.rat} \title{ Rational Approximation } \description{ Find rational approximations to the components of a real numeric object using a standard continued fraction method. } \usage{ rational(x, cycles = 10, max.denominator = 2000, \dots) } \arguments{ \item{x}{ Any object of mode numeric. Missing values are now allowed. } \item{cycles}{ The maximum number of steps to be used in the continued fraction approximation process. } \item{max.denominator}{ An early termination criterion. If any partial denominator exceeds \code{max.denominator} the continued fraction stops at that point. } \item{\dots}{ arguments passed to or from other methods. }} \value{ A numeric object with the same attributes as \code{x} but with entries rational approximations to the values. This effectively rounds relative to the size of the object and replaces very small entries by zero. } \details{ Each component is first expanded in a continued fraction of the form \code{x = floor(x) + 1/(p1 + 1/(p2 + \dots)))} where \code{p1}, \code{p2}, \dots are positive integers, terminating either at \code{cycles} terms or when a \code{pj > max.denominator}. The continued fraction is then re-arranged to retrieve the numerator and denominator as integers and the ratio returned as the value. } \seealso{ \code{\link{fractions}} } \examples{ X <- matrix(runif(25), 5, 5) zapsmall(solve(X, X/5)) # print near-zeroes as zero rational(solve(X, X/5)) } \keyword{math} MASS/man/gilgais.Rd0000644000176000001440000000373411754562034013544 0ustar ripleyusers% file MASS/man/gilgais.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{gilgais} \alias{gilgais} \title{ Line Transect of Soil in Gilgai Territory } \description{ This dataset was collected on a line transect survey in gilgai territory in New South Wales, Australia. Gilgais are natural gentle depressions in otherwise flat land, and sometimes seem to be regularly distributed. The data collection was stimulated by the question: are these patterns reflected in soil properties? At each of 365 sampling locations on a linear grid of 4 meters spacing, samples were taken at depths 0-10 cm, 30-40 cm and 80-90 cm below the surface. pH, electrical conductivity and chloride content were measured on a 1:5 soil:water extract from each sample. } \usage{ gilgais } \format{ This data frame contains the following columns: \describe{ \item{\code{pH00}}{ pH at depth 0--10 cm. } \item{\code{pH30}}{ pH at depth 30--40 cm. } \item{\code{pH80}}{ pH at depth 80--90 cm. } \item{\code{e00}}{ electrical conductivity in mS/cm (0--10 cm). } \item{\code{e30}}{ electrical conductivity in mS/cm (30--40 cm). } \item{\code{e80}}{ electrical conductivity in mS/cm (80--90 cm). } \item{\code{c00}}{ chloride content in ppm (0--10 cm). } \item{\code{c30}}{ chloride content in ppm (30--40 cm). } \item{\code{c80}}{ chloride content in ppm (80--90 cm). } } } \source{ Webster, R. (1977) Spectral analysis of gilgai soil. \emph{Australian Journal of Soil Research} \bold{15}, 191--204. Laslett, G. M. (1989) Kriging and splines: An empirical comparison of their predictive performance in some applications (with discussion). \emph{Journal of the American Statistical Association} \bold{89}, 319--409 } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/cats.Rd0000644000176000001440000000161411754562034013052 0ustar ripleyusers% file MASS/man/cats.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{cats} \alias{cats} \title{ Anatomical Data from Domestic Cats } \description{ The heart and body weights of samples of male and female cats used for \emph{digitalis} experiments. The cats were all adult, over 2 kg body weight. } \usage{ cats } \format{ This data frame contains the following columns: \describe{ \item{\code{Sex}}{ sex: Factor with evels \code{"F"} and \code{"M"}. } \item{\code{Bwt}}{ body weight in kg. } \item{\code{Hwt}}{ heart weight in g. } } } \source{ R. A. Fisher (1947) The analysis of covariance method for the relation between a part and the whole, \emph{Biometrics} \bold{3}, 65--68. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/glm.convert.Rd0000644000176000001440000000214311754562034014354 0ustar ripleyusers% file MASS/man/glm.convert.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{glm.convert} \alias{glm.convert} \title{ Change a Negative Binomial fit to a GLM fit } \description{ This function modifies an output object from \code{glm.nb()} to one that looks like the output from \code{glm()} with a negative binomial family. This allows it to be updated keeping the theta parameter fixed. } \usage{ glm.convert(object) } \arguments{ \item{object}{ An object of class \code{"negbin"}, typically the output from \code{\link{glm.nb}()}. }} \value{ An object of class \code{"glm"} with negative binomial family. The theta parameter is then fixed at its present estimate. } \details{ Convenience function needed to effect some low level changes to the structure of the fitted model object. } \seealso{ \code{\link{glm.nb}}, \code{\link{negative.binomial}}, \code{\link{glm}} } \examples{ quine.nb1 <- glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = quine) quine.nbA <- glm.convert(quine.nb1) quine.nbB <- update(quine.nb1, . ~ . + Sex:Age:Lrn) anova(quine.nbA, quine.nbB) } \keyword{regression} \keyword{models} MASS/man/Animals.Rd0000644000176000001440000000142611754562034013505 0ustar ripleyusers% file MASS/man/animals.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Animals} \alias{Animals} \title{ Brain and Body Weights for 28 Species } \description{ Average brain and body weights for 28 species of land animals. } \usage{ Animals } \format{ \describe{ \item{\code{body}}{ body weight in kg. } \item{\code{brain}}{ brain weight in g. } } } \note{ The name \code{Animals} avoids conflicts with a system dataset \code{animals} in S-PLUS 4.5 and later. } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley, p. 57. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/deaths.Rd0000644000176000001440000000124711754562034013372 0ustar ripleyusers% file MASS/man/deaths.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{deaths} \alias{deaths} \title{ Monthly Deaths from Lung Diseases in the UK } \description{ A time series giving the monthly deaths from bronchitis, emphysema and asthma in the UK, 1974-1979, both sexes (\code{deaths}), } \usage{ deaths } \source{ P. J. Diggle (1990) \emph{Time Series: A Biostatistical Introduction.} Oxford, table A.3 } \seealso{ This the same as dataset \code{\link{ldeaths}} in \R's \pkg{datasets} package. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/Pima.tr.Rd0000644000176000001440000000406011754562034013430 0ustar ripleyusers% file MASS/man/Pima.tr.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Pima.tr} \alias{Pima.tr} \alias{Pima.tr2} \alias{Pima.te} \title{ Diabetes in Pima Indian Women } \description{ A population of women who were at least 21 years old, of Pima Indian heritage and living near Phoenix, Arizona, was tested for diabetes according to World Health Organization criteria. The data were collected by the US National Institute of Diabetes and Digestive and Kidney Diseases. We used the 532 complete records after dropping the (mainly missing) data on serum insulin. } \usage{ Pima.tr Pima.tr2 Pima.te } \format{ These data frames contains the following columns: \describe{ \item{\code{npreg}}{ number of pregnancies. } \item{\code{glu}}{ plasma glucose concentration in an oral glucose tolerance test. } \item{\code{bp}}{ diastolic blood pressure (mm Hg). } \item{\code{skin}}{ triceps skin fold thickness (mm). } \item{\code{bmi}}{ body mass index (weight in kg/(height in m)\eqn{^2}{\^2}). } \item{\code{ped}}{ diabetes pedigree function. } \item{\code{age}}{ age in years. } \item{\code{type}}{ \code{Yes} or \code{No}, for diabetic according to WHO criteria. } } } \details{ The training set \code{Pima.tr} contains a randomly selected set of 200 subjects, and \code{Pima.te} contains the remaining 332 subjects. \code{Pima.tr2} contains \code{Pima.tr} plus 100 subjects with missing values in the explanatory variables. } \source{ Smith, J. W., Everhart, J. E., Dickson, W. C., Knowler, W. C. and Johannes, R. S. (1988) Using the ADAP learning algorithm to forecast the onset of \emph{diabetes mellitus}. In \emph{Proceedings of the Symposium on Computer Applications in Medical Care (Washington, 1988),} ed. R. A. Greenes, pp. 261--265. Los Alamitos, CA: IEEE Computer Society Press. Ripley, B.D. (1996) \emph{Pattern Recognition and Neural Networks.} Cambridge: Cambridge University Press. } \keyword{datasets} MASS/man/glm.nb.Rd0000644000176000001440000000737511754562034013307 0ustar ripleyusers% file MASS/man/glm.nb.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{glm.nb} \alias{glm.nb} \alias{family.negbin} \alias{logLik.negbin} \title{ Fit a Negative Binomial Generalized Linear Model } \description{ A modification of the system function \code{\link{glm}()} to include estimation of the additional parameter, \code{theta}, for a Negative Binomial generalized linear model. } %\usage{ %glm.nb(formula, \dots, init.theta, link = log) %} \usage{ glm.nb(formula, data, weights, subset, na.action, start = NULL, etastart, mustart, control = glm.control(...), method = "glm.fit", model = TRUE, x = FALSE, y = TRUE, contrasts = NULL, ..., init.theta, link = log) } \arguments{ \item{formula, data, weights, subset, na.action, start, etastart, mustart, control, method, model, x, y, contrasts, \dots}{ arguments for the \code{\link{glm}()} function. Note that these exclude \code{family} and \code{offset} (but \code{\link{offset}()} can be used). } \item{init.theta}{ Optional initial value for the theta parameter. If omitted a moment estimator after an initial fit using a Poisson GLM is used. } \item{link}{ The link function. Currently must be one of \code{log}, \code{sqrt} or \code{identity}. }} \value{ A fitted model object of class \code{negbin} inheriting from \code{glm} and \code{lm}. The object is like the output of \code{glm} but contains three additional components, namely \code{theta} for the ML estimate of theta, \code{SE.theta} for its approximate standard error (using observed rather than expected information), and \code{twologlik} for twice the log-likelihood function. } \details{ An alternating iteration process is used. For given \code{theta} the GLM is fitted using the same process as used by \code{glm()}. For fixed means the \code{theta} parameter is estimated using score and information iterations. The two are alternated until convergence of both. (The number of alternations and the number of iterations when estimating \code{theta} are controlled by the \code{maxit} parameter of \code{glm.control}.) Setting \code{trace > 0} traces the alternating iteration process. Setting \code{trace > 1} traces the \code{glm} fit, and setting \code{trace > 2} traces the estimation of \code{theta}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{glm}}, \code{\link{negative.binomial}}, \code{\link{anova.negbin}}, \code{\link{summary.negbin}}, \code{\link{theta.md}} There is a \code{\link{simulate}} method. } \examples{ quine.nb1 <- glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = quine) quine.nb2 <- update(quine.nb1, . ~ . + Sex:Age:Lrn) quine.nb3 <- update(quine.nb2, Days ~ .^4) anova(quine.nb1, quine.nb2, quine.nb3) \dontshow{## PR#1695 y <- c(7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3, 3, 4) lag1 <- c(0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3, 3) lag2 <- c(0, 0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3) lag3 <- c(0, 0, 0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5) (fit <- glm(y ~ lag1+lag2+lag3, family=poisson(link=identity), start=c(2, 0.1, 0.1, 0.1))) try(glm.nb(y ~ lag1+lag2+lag3, link=identity)) glm.nb(y ~ lag1+lag2+lag3, link=identity, start=c(2, 0.1, 0.1, 0.1)) glm.nb(y ~ lag1+lag2+lag3, link=identity, start=coef(fit)) glm.nb(y ~ lag1+lag2+lag3, link=identity, etastart=rep(5, 42)) }} \keyword{regression} \keyword{models} MASS/man/drivers.Rd0000644000176000001440000000131011754562034013567 0ustar ripleyusers% file MASS/man/drivers.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{drivers} \alias{drivers} \title{ Deaths of Car Drivers in Great Britain 1969-84 } \description{ A regular time series giving the monthly totals of car drivers in Great Britain killed or seriously injured Jan 1969 to Dec 1984. Compulsory wearing of seat belts was introduced on 31 Jan 1983 } \usage{ drivers } \source{ Harvey, A.C. (1989) \emph{Forecasting, Structural Time Series Models and the Kalman Filter.} Cambridge University Press, pp. 519--523. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/mcycle.Rd0000644000176000001440000000146211754562034013375 0ustar ripleyusers% file MASS/man/mcycle.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{mcycle} \alias{mcycle} \title{ Data from a Simulated Motorcycle Accident } \description{ A data frame giving a series of measurements of head acceleration in a simulated motorcycle accident, used to test crash helmets. } \usage{ mcycle } \format{ \describe{ \item{\code{times}}{ in milliseconds after impact. } \item{\code{accel}}{ in g. } } } \source{ Silverman, B. W. (1985) Some aspects of the spline smoothing approach to non-parametric curve fitting. \emph{Journal of the Royal Statistical Society series B} \bold{47}, 1--52. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/Null.Rd0000644000176000001440000000230712412216712013021 0ustar ripleyusers% file MASS/man/Null.Rd % copyright (C) 1994-2014 W. N. Venables and B. D. Ripley % \name{Null} \alias{Null} \title{ Null Spaces of Matrices } \description{ Given a matrix, \code{M}, find a matrix \code{N} giving a basis for the (left) null space. That is \code{crossprod(N, M) = t(N) \%*\% M} is an all-zero matrix and \code{N} has the maximum number of linearly independent columns. } \usage{ Null(M) } \arguments{ \item{M}{ Input matrix. A vector is coerced to a 1-column matrix. } } \details{ For a basis for the (right) null space \eqn{\{x : Mx = 0\}}{{x : Mx = 0}}, use \code{Null(t(M))}. } \value{ The matrix \code{N} with the basis for the (left) null space, or a matrix with zero columns if the matrix \code{M} is square and of maximal rank. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ % avoid link to Matrix \code{\link[base]{qr}}, \code{\link{qr.Q}}. } \examples{ # The function is currently defined as function(M) { tmp <- qr(M) set <- if(tmp$rank == 0L) seq_len(ncol(M)) else -seq_len(tmp$rank) qr.Q(tmp, complete = TRUE)[, set, drop = FALSE] } } \keyword{algebra} MASS/man/whiteside.Rd0000644000176000001440000000421411754562034014104 0ustar ripleyusers% file MASS/man/whiteside.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{whiteside} \alias{whiteside} \title{ House Insulation: Whiteside's Data } \description{ Mr Derek Whiteside of the UK Building Research Station recorded the weekly gas consumption and average external temperature at his own house in south-east England for two heating seasons, one of 26 weeks before, and one of 30 weeks after cavity-wall insulation was installed. The object of the exercise was to assess the effect of the insulation on gas consumption. } \usage{ whiteside } \format{ The \code{whiteside} data frame has 56 rows and 3 columns.: \describe{ \item{\code{Insul}}{ A factor, before or after insulation. } \item{\code{Temp}}{ Purportedly the average outside temperature in degrees Celsius. (These values is far too low for any 56-week period in the 1960s in South-East England. It might be the weekly average of daily minima.) } \item{\code{Gas}}{ The weekly gas consumption in 1000s of cubic feet. }}} \source{ A data set collected in the 1960s by Mr Derek Whiteside of the UK Building Research Station. Reported by Hand, D. J., Daly, F., McConway, K., Lunn, D. and Ostrowski, E. eds (1993) \emph{A Handbook of Small Data Sets.} Chapman & Hall, p. 69. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ require(lattice) xyplot(Gas ~ Temp | Insul, whiteside, panel = function(x, y, ...) { panel.xyplot(x, y, ...) panel.lmline(x, y, ...) }, xlab = "Average external temperature (deg. C)", ylab = "Gas consumption (1000 cubic feet)", aspect = "xy", strip = function(...) strip.default(..., style = 1)) gasB <- lm(Gas ~ Temp, whiteside, subset = Insul=="Before") gasA <- update(gasB, subset = Insul=="After") summary(gasB) summary(gasA) gasBA <- lm(Gas ~ Insul/Temp - 1, whiteside) summary(gasBA) gasQ <- lm(Gas ~ Insul/(Temp + I(Temp^2)) - 1, whiteside) coef(summary(gasQ)) gasPR <- lm(Gas ~ Insul + Temp, whiteside) anova(gasPR, gasBA) options(contrasts = c("contr.treatment", "contr.poly")) gasBA1 <- lm(Gas ~ Insul*Temp, whiteside) coef(summary(gasBA1)) } \keyword{datasets} MASS/man/cov.rob.Rd0000644000176000001440000001112111754562034013462 0ustar ripleyusers% file lqs/man/cov.rob.Rd % copyright (C) 1998-9 B. D. Ripley % \name{cov.rob} \alias{cov.rob} \alias{cov.mve} \alias{cov.mcd} \title{ Resistant Estimation of Multivariate Location and Scatter } \description{ Compute a multivariate location and scale estimate with a high breakdown point -- this can be thought of as estimating the mean and covariance of the \code{good} part of the data. \code{cov.mve} and \code{cov.mcd} are compatibility wrappers. } \usage{ cov.rob(x, cor = FALSE, quantile.used = floor((n + p + 1)/2), method = c("mve", "mcd", "classical"), nsamp = "best", seed) cov.mve(\dots) cov.mcd(\dots) } \arguments{ \item{x}{ a matrix or data frame. } \item{cor}{ should the returned result include a correlation matrix? } \item{quantile.used}{ the minimum number of the data points regarded as \code{good} points. } \item{method}{ the method to be used -- minimum volume ellipsoid, minimum covariance determinant or classical product-moment. Using \code{cov.mve} or \code{cov.mcd} forces \code{mve} or \code{mcd} respectively. } \item{nsamp}{ the number of samples or \code{"best"} or \code{"exact"} or \code{"sample"}. If \code{"sample"} the number chosen is \code{min(5*p, 3000)}, taken from Rousseeuw and Hubert (1997). If \code{"best"} exhaustive enumeration is done up to 5000 samples: if \code{"exact"} exhaustive enumeration will be attempted however many samples are needed. } \item{seed}{ the seed to be used for random sampling: see \code{\link{RNGkind}}. The current value of \code{.Random.seed} will be preserved if it is set. } \item{\dots}{arguments to \code{cov.rob} other than \code{method}.} } \value{ A list with components \item{center}{ the final estimate of location. } \item{cov}{ the final estimate of scatter. } \item{cor}{ (only is \code{cor = TRUE}) the estimate of the correlation matrix. } \item{sing}{ message giving number of singular samples out of total } \item{crit}{ the value of the criterion on log scale. For MCD this is the determinant, and for MVE it is proportional to the volume. } \item{best}{ the subset used. For MVE the best sample, for MCD the best set of size \code{quantile.used}. } \item{n.obs}{ total number of observations. }} \details{ For method \code{"mve"}, an approximate search is made of a subset of size \code{quantile.used} with an enclosing ellipsoid of smallest volume; in method \code{"mcd"} it is the volume of the Gaussian confidence ellipsoid, equivalently the determinant of the classical covariance matrix, that is minimized. The mean of the subset provides a first estimate of the location, and the rescaled covariance matrix a first estimate of scatter. The Mahalanobis distances of all the points from the location estimate for this covariance matrix are calculated, and those points within the 97.5\% point under Gaussian assumptions are declared to be \code{good}. The final estimates are the mean and rescaled covariance of the \code{good} points. The rescaling is by the appropriate percentile under Gaussian data; in addition the first covariance matrix has an \emph{ad hoc} finite-sample correction given by Marazzi. For method \code{"mve"} the search is made over ellipsoids determined by the covariance matrix of \code{p} of the data points. For method \code{"mcd"} an additional improvement step suggested by Rousseeuw and van Driessen (1999) is used, in which once a subset of size \code{quantile.used} is selected, an ellipsoid based on its covariance is tested (as this will have no larger a determinant, and may be smaller). } \references{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley. A. Marazzi (1993) \emph{Algorithms, Routines and S Functions for Robust Statistics.} Wadsworth and Brooks/Cole. P. J. Rousseeuw and B. C. van Zomeren (1990) Unmasking multivariate outliers and leverage points, \emph{Journal of the American Statistical Association}, \bold{85}, 633--639. P. J. Rousseeuw and K. van Driessen (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. P. Rousseeuw and M. Hubert (1997) Recent developments in PROGRESS. In \emph{L1-Statistical Procedures and Related Topics } ed Y. Dodge, IMS Lecture Notes volume \bold{31}, pp. 201--214. } \seealso{ \code{\link{lqs}} } \examples{ set.seed(123) cov.rob(stackloss) cov.rob(stack.x, method = "mcd", nsamp = "exact") } \keyword{robust} \keyword{multivariate} MASS/man/cov.trob.Rd0000644000176000001440000000426411754562034013660 0ustar ripleyusers% file MASS/man/cov.trob.Rd % copyright (C) 1997-9 W. N. Venables and B. D. Ripley % \name{cov.trob} \alias{cov.trob} \title{ Covariance Estimation for Multivariate t Distribution } \description{ Estimates a covariance or correlation matrix assuming the data came from a multivariate t distribution: this provides some degree of robustness to outlier without giving a high breakdown point. } \usage{ cov.trob(x, wt = rep(1, n), cor = FALSE, center = TRUE, nu = 5, maxit = 25, tol = 0.01) } \arguments{ \item{x}{ data matrix. Missing values (NAs) are not allowed. } \item{wt}{ A vector of weights for each case: these are treated as if the case \code{i} actually occurred \code{wt[i]} times. } \item{cor}{ Flag to choose between returning the correlation (\code{cor = TRUE}) or covariance (\code{cor = FALSE}) matrix. } \item{center}{ a logical value or a numeric vector providing the location about which the covariance is to be taken. If \code{center = FALSE}, no centering is done; if \code{center = TRUE} the MLE of the location vector is used. } \item{nu}{ \sQuote{degrees of freedom} for the multivariate t distribution. Must exceed 2 (so that the covariance matrix is finite). } \item{maxit}{ Maximum number of iterations in fitting. } \item{tol}{ Convergence tolerance for fitting. }} \value{ A list with the following components \item{cov}{ the fitted covariance matrix. } \item{center}{ the estimated or specified location vector. } \item{wt}{ the specified weights: only returned if the \code{wt} argument was given. } \item{n.obs}{ the number of cases used in the fitting. } \item{cor}{ the fitted correlation matrix: only returned if \code{cor = TRUE}. } \item{call}{ The matched call. } \item{iter}{ The number of iterations used. }} \references{ J. T. Kent, D. E. Tyler and Y. Vardi (1994) A curious likelihood identity for the multivariate t-distribution. \emph{Communications in Statistics---Simulation and Computation} \bold{23}, 441--453. Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \seealso{ \code{\link{cov}}, \code{\link{cov.wt}}, \code{\link{cov.mve}} } \examples{ cov.trob(stackloss) } \keyword{multivariate} MASS/man/eqscplot.Rd0000644000176000001440000000333511754562034013754 0ustar ripleyusers% file MASS/man/eqscplot.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{eqscplot} \alias{eqscplot} \title{ Plots with Geometrically Equal Scales } \description{ Version of a scatterplot with scales chosen to be equal on both axes, that is 1cm represents the same units on each } \usage{ eqscplot(x, y, ratio = 1, tol = 0.04, uin, \dots) } \arguments{ \item{x}{ vector of x values, or a 2-column matrix, or a list with components \code{x} and \code{y} } \item{y}{ vector of y values } \item{ratio}{ desired ratio of units on the axes. Units on the y axis are drawn at \code{ratio} times the size of units on the x axis. Ignored if \code{uin} is specified and of length 2. } \item{tol}{ proportion of white space at the margins of plot } \item{uin}{ desired values for the units-per-inch parameter. If of length 1, the desired units per inch on the x axis. } \item{\dots}{ further arguments for \code{plot} and graphical parameters. Note that \code{par(xaxs="i", yaxs="i")} is enforced, and \code{xlim} and \code{ylim} will be adjusted accordingly. }} \value{ invisibly, the values of \code{uin} used for the plot. } \section{Side Effects}{ performs the plot. } \details{ Limits for the x and y axes are chosen so that they include the data. One of the sets of limits is then stretched from the midpoint to make the units in the ratio given by \code{ratio}. Finally both are stretched by \code{1 + tol} to move points away from the axes, and the points plotted. } \note{ Arguments \code{ratio} and \code{uin} were suggested by Bill Dunlap. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{plot}}, \code{\link{par}} } \keyword{hplot} MASS/man/anorexia.Rd0000644000176000001440000000214711754562034013730 0ustar ripleyusers% file MASS/man/anorexia.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{anorexia} \alias{anorexia} \title{ Anorexia Data on Weight Change } \description{ The \code{anorexia} data frame has 72 rows and 3 columns. Weight change data for young female anorexia patients. } \usage{ anorexia } \format{ This data frame contains the following columns: \describe{ \item{\code{Treat}}{ Factor of three levels: \code{"Cont"} (control), \code{"CBT"} (Cognitive Behavioural treatment) and \code{"FT"} (family treatment). } \item{\code{Prewt}}{ Weight of patient before study period, in lbs. } \item{\code{Postwt}}{ Weight of patient after study period, in lbs. } } } \source{ Hand, D. J., Daly, F., McConway, K., Lunn, D. and Ostrowski, E. eds (1993) \emph{A Handbook of Small Data Sets.} Chapman & Hall, Data set 285 (p. 229) (Note that the original source mistakenly says that weights are in kg.) } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/eagles.Rd0000644000176000001440000000261111754562034013356 0ustar ripleyusers% file MASS/man/eagles.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{eagles} \alias{eagles} \title{ Foraging Ecology of Bald Eagles } \description{ Knight and Skagen collected during a field study on the foraging behaviour of wintering Bald Eagles in Washington State, USA data concerning 160 attempts by one (pirating) Bald Eagle to steal a chum salmon from another (feeding) Bald Eagle. } \usage{ eagles } \format{ The \code{eagles} data frame has 8 rows and 5 columns. \describe{ \item{\code{y}}{ Number of successful attempts. } \item{\code{n}}{ Total number of attempts. } \item{\code{P}}{ Size of pirating eagle (\code{L} = large, \code{S} = small). } \item{\code{A}}{ Age of pirating eagle (\code{I} = immature, \code{A} = adult). } \item{\code{V}}{ Size of victim eagle (\code{L} = large, \code{S} = small). } } } \source{ Knight, R. L. and Skagen, S. K. (1988) Agonistic asymmetries and the foraging ecology of Bald Eagles. \emph{Ecology} \bold{69}, 1188--1194. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \examples{ eagles.glm <- glm(cbind(y, n - y) ~ P*A + V, data = eagles, family = binomial) dropterm(eagles.glm) prof <- profile(eagles.glm) plot(prof) pairs(prof) } \keyword{datasets} MASS/man/bandwidth.nrd.Rd0000644000176000001440000000132511754562034014645 0ustar ripleyusers\name{bandwidth.nrd} \alias{bandwidth.nrd} \title{ Bandwidth for density() via Normal Reference Distribution } \description{ A well-supported rule-of-thumb for choosing the bandwidth of a Gaussian kernel density estimator. } \usage{ bandwidth.nrd(x) } \arguments{ \item{x}{ A data vector. }} \value{ A bandwidth on a scale suitable for the \code{width} argument of \code{density}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Springer, equation (5.5) on page 130. } \examples{ # The function is currently defined as function(x) { r <- quantile(x, c(0.25, 0.75)) h <- (r[2] - r[1])/1.34 4 * 1.06 * min(sqrt(var(x)), h) * length(x)^(-1/5) } } \keyword{dplot} MASS/man/mca.Rd0000644000176000001440000000262011754562034012656 0ustar ripleyusers% file MASS/man/mca.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{mca} \alias{mca} \alias{print.mca} \title{ Multiple Correspondence Analysis } \description{ Computes a multiple correspondence analysis of a set of factors. } \usage{ mca(df, nf = 2, abbrev = FALSE) } \arguments{ \item{df}{ A data frame containing only factors } \item{nf}{ The number of dimensions for the MCA. Rarely 3 might be useful. } \item{abbrev}{ Should the vertex names be abbreviated? By default these are of the form \sQuote{factor.level} but if \code{abbrev = TRUE} they are just \sQuote{level} which will suffice if the factors have distinct levels. }} \value{ An object of class \code{"mca"}, with components \item{rs}{ The coordinates of the rows, in \code{nf} dimensions. } \item{cs}{ The coordinates of the column vertices, one for each level of each factor. } \item{fs}{ Weights for each row, used to interpolate additional factors in \code{predict.mca}. } \item{p}{ The number of factors } \item{d}{ The singular values for the \code{nf} dimensions. } \item{call}{ The matched call. }} \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{predict.mca}}, \code{\link{plot.mca}}, \code{\link{corresp}} } \examples{ farms.mca <- mca(farms, abbrev=TRUE) farms.mca plot(farms.mca) } \keyword{category} \keyword{multivariate} MASS/man/SP500.Rd0000644000176000001440000000067011754562034012670 0ustar ripleyusers\name{SP500} \alias{SP500} \title{ Returns of the Standard and Poors 500 } \description{ Returns of the Standard and Poors 500 Index in the 1990's } \usage{ SP500 } \format{ A vector of returns of the Standard and Poors 500 index for all the trading days in 1990, 1991, \dots, 1999. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/Insurance.Rd0000644000176000001440000000335411754562034014052 0ustar ripleyusers% file MASS/man/Insurance.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Insurance} \alias{Insurance} \title{ Numbers of Car Insurance claims } \description{ The data given in data frame \code{Insurance} consist of the numbers of policyholders of an insurance company who were exposed to risk, and the numbers of car insurance claims made by those policyholders in the third quarter of 1973. } \usage{ Insurance } \format{ This data frame contains the following columns: \describe{ \item{\code{District}}{ factor: district of residence of policyholder (1 to 4): 4 is major cities. } \item{\code{Group}}{ an ordered factor: group of car with levels <1 litre, 1--1.5 litre, 1.5--2 litre, >2 litre. } \item{\code{Age}}{ an ordered factor: the age of the insured in 4 groups labelled <25, 25--29, 30--35, >35. } \item{\code{Holders}}{ numbers of policyholders. } \item{\code{Claims}}{ numbers of claims } } } \source{ L. A. Baxter, S. M. Coutts and G. A. F. Ross (1980) Applications of linear models in motor insurance. \emph{Proceedings of the 21st International Congress of Actuaries, Zurich} pp. 11--29. M. Aitkin, D. Anderson, B. Francis and J. Hinde (1989) \emph{Statistical Modelling in GLIM.} Oxford University Press. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \examples{ ## main-effects fit as Poisson GLM with offset glm(Claims ~ District + Group + Age + offset(log(Holders)), data = Insurance, family = poisson) # same via loglm loglm(Claims ~ District + Group + Age + offset(log(Holders)), data = Insurance) } \keyword{datasets} MASS/man/GAGurine.Rd0000644000176000001440000000167311754562034013566 0ustar ripleyusers% file MASS/man/GAGurine.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{GAGurine} \alias{GAGurine} \title{ Level of GAG in Urine of Children } \description{ Data were collected on the concentration of a chemical GAG in the urine of 314 children aged from zero to seventeen years. The aim of the study was to produce a chart to help a paediatrican to assess if a child's GAG concentration is \sQuote{normal}. } \usage{ GAGurine } \format{ This data frame contains the following columns: \describe{ \item{\code{Age}}{ age of child in years. } \item{\code{GAG}}{ concentration of GAG (the units have been lost). } } } \source{ Mrs Susan Prosser, Paediatrics Department, University of Oxford, via Department of Statistics Consulting Service. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/quine.Rd0000644000176000001440000000257111754562034013244 0ustar ripleyusers% file MASS/man/quine.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{quine} \alias{quine} \title{ Absenteeism from School in Rural New South Wales } \description{ The \code{quine} data frame has 146 rows and 5 columns. Children from Walgett, New South Wales, Australia, were classified by Culture, Age, Sex and Learner status and the number of days absent from school in a particular school year was recorded. } \usage{ quine } \format{ This data frame contains the following columns: \describe{ \item{\code{Eth}}{ ethnic background: Aboriginal or Not, (\code{"A"} or \code{"N"}). } \item{\code{Sex}}{ sex: factor with levels (\code{"F"} or \code{"M"}). } \item{\code{Age}}{ age group: Primary (\code{"F0"}), or forms \code{"F1,"} \code{"F2"} or \code{"F3"}. } \item{\code{Lrn}}{ learner status: factor with levels Average or Slow learner, (\code{"AL"} or \code{"SL"}). } \item{\code{Days}}{ days absent from school in the year. } } } \source{ S. Quine, quoted in Aitkin, M. (1978) The analysis of unbalanced cross classifications (with discussion). \emph{Journal of the Royal Statistical Society series A} \bold{141}, 195--223. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/rnegbin.Rd0000644000176000001440000000235611754562034013550 0ustar ripleyusers% file MASS/man/rnegbin.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{rnegbin} \alias{rnegbin} \title{ Simulate Negative Binomial Variates } \description{ Function to generate random outcomes from a Negative Binomial distribution, with mean \code{mu} and variance \code{mu + mu^2/theta}. } \usage{ rnegbin(n, mu = n, theta = stop("'theta' must be specified")) } \arguments{ \item{n}{ If a scalar, the number of sample values required. If a vector, \code{length(n)} is the number required and \code{n} is used as the mean vector if \code{mu} is not specified. } \item{mu}{ The vector of means. Short vectors are recycled. } \item{theta}{ Vector of values of the \code{theta} parameter. Short vectors are recycled. }} \value{ Vector of random Negative Binomial variate values. } \section{Side Effects}{ Changes \code{.Random.seed} in the usual way. } \details{ The function uses the representation of the Negative Binomial distribution as a continuous mixture of Poisson distributions with Gamma distributed means. Unlike \code{rnbinom} the index can be arbitrary. } \examples{ # Negative Binomials with means fitted(fm) and theta = 4.5 fm <- glm.nb(Days ~ ., data = quine) dummy <- rnegbin(fitted(fm), theta = 4.5) } \keyword{distribution} MASS/man/epil.Rd0000644000176000001440000000500513577076541013057 0ustar ripleyusers\name{epil} \alias{epil} \title{ Seizure Counts for Epileptics } \description{ Thall and Vail (1990) give a data set on two-week seizure counts for 59 epileptics. The number of seizures was recorded for a baseline period of 8 weeks, and then patients were randomly assigned to a treatment group or a control group. Counts were then recorded for four successive two-week periods. The subject's age is the only covariate. } \usage{ epil } \format{ This data frame has 236 rows and the following 9 columns: \describe{ \item{\code{y}}{ the count for the 2-week period. } \item{\code{trt}}{ treatment, \code{"placebo"} or \code{"progabide"}. } \item{\code{base}}{ the counts in the baseline 8-week period. } \item{\code{age}}{ subject's age, in years. } \item{\code{V4}}{ \code{0/1} indicator variable of period 4. } \item{\code{subject}}{ subject number, 1 to 59. } \item{\code{period}}{ period, 1 to 4. } \item{\code{lbase}}{ log-counts for the baseline period, centred to have zero mean. } \item{\code{lage}}{ log-ages, centred to have zero mean. } } } \source{ Thall, P. F. and Vail, S. C. (1990) Some covariance models for longitudinal count data with over-dispersion. \emph{Biometrics} \bold{46}, 657--671. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth Edition. Springer. } \examples{ summary(glm(y ~ lbase*trt + lage + V4, family = poisson, data = epil), cor = FALSE) epil2 <- epil[epil$period == 1, ] epil2["period"] <- rep(0, 59); epil2["y"] <- epil2["base"] epil["time"] <- 1; epil2["time"] <- 4 epil2 <- rbind(epil, epil2) epil2$pred <- unclass(epil2$trt) * (epil2$period > 0) epil2$subject <- factor(epil2$subject) epil3 <- aggregate(epil2, list(epil2$subject, epil2$period > 0), function(x) if(is.numeric(x)) sum(x) else x[1]) epil3$pred <- factor(epil3$pred, labels = c("base", "placebo", "drug")) contrasts(epil3$pred) <- structure(contr.sdif(3), dimnames = list(NULL, c("placebo-base", "drug-placebo"))) ## IGNORE_RDIFF_BEGIN summary(glm(y ~ pred + factor(subject) + offset(log(time)), family = poisson, data = epil3), cor = FALSE) ## IGNORE_RDIFF_END summary(glmmPQL(y ~ lbase*trt + lage + V4, random = ~ 1 | subject, family = poisson, data = epil)) summary(glmmPQL(y ~ pred, random = ~1 | subject, family = poisson, data = epil3)) } \keyword{datasets} MASS/man/pairs.lda.Rd0000644000176000001440000000322011754562034013770 0ustar ripleyusers% file MASS/man/pairs.lda.Rd % copyright (C) 1998-9 W. N. Venables and B. D. Ripley % \name{pairs.lda} \alias{pairs.lda} \title{ Produce Pairwise Scatterplots from an 'lda' Fit } \description{ Pairwise scatterplot of the data on the linear discriminants. } \usage{ \method{pairs}{lda}(x, labels = colnames(x), panel = panel.lda, dimen, abbrev = FALSE, \dots, cex=0.7, type = c("std", "trellis")) } \arguments{ \item{x}{ Object of class \code{"lda"}. } \item{labels}{ vector of character strings for labelling the variables. } \item{panel}{ panel function to plot the data in each panel. } \item{dimen}{ The number of linear discriminants to be used for the plot; if this exceeds the number determined by \code{x} the smaller value is used. } \item{abbrev}{ whether the group labels are abbreviated on the plots. If \code{abbrev > 0} this gives \code{minlength} in the call to \code{abbreviate}. } \item{\dots}{ additional arguments for \code{pairs.default}. } \item{cex}{ graphics parameter \code{cex} for labels on plots. } \item{type}{ type of plot. The default is in the style of \code{\link{pairs.default}}; the style \code{"trellis"} uses the Trellis function \code{\link[lattice]{splom}}. }} \details{ This function is a method for the generic function \code{pairs()} for class \code{"lda"}. It can be invoked by calling \code{pairs(x)} for an object \code{x} of the appropriate class, or directly by calling \code{pairs.lda(x)} regardless of the class of the object. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{pairs}} } \keyword{hplot} \keyword{multivariate} MASS/man/oats.Rd0000644000176000001440000000364413577100316013067 0ustar ripleyusers% file MASS/man/oats.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{oats} \alias{oats} \title{ Data from an Oats Field Trial } \description{ The yield of oats from a split-plot field trial using three varieties and four levels of manurial treatment. The experiment was laid out in 6 blocks of 3 main plots, each split into 4 sub-plots. The varieties were applied to the main plots and the manurial treatments to the sub-plots. } \usage{ oats } \format{ This data frame contains the following columns: \describe{ \item{\code{B}}{ Blocks, levels I, II, III, IV, V and VI. } \item{\code{V}}{ Varieties, 3 levels. } \item{\code{N}}{ Nitrogen (manurial) treatment, levels 0.0cwt, 0.2cwt, 0.4cwt and 0.6cwt, showing the application in cwt/acre. } \item{\code{Y}}{ Yields in 1/4lbs per sub-plot, each of area 1/80 acre. } } } \source{ Yates, F. (1935) Complex experiments, \emph{Journal of the Royal Statistical Society Suppl.} \bold{2}, 181--247. Also given in Yates, F. (1970) \emph{Experimental design: Selected papers of Frank Yates, C.B.E, F.R.S.} London: Griffin. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ oats$Nf <- ordered(oats$N, levels = sort(levels(oats$N))) oats.aov <- aov(Y ~ Nf*V + Error(B/V), data = oats, qr = TRUE) ## IGNORE_RDIFF_BEGIN summary(oats.aov) summary(oats.aov, split = list(Nf=list(L=1, Dev=2:3))) ## IGNORE_RDIFF_END par(mfrow = c(1,2), pty = "s") plot(fitted(oats.aov[[4]]), studres(oats.aov[[4]])) abline(h = 0, lty = 2) oats.pr <- proj(oats.aov) qqnorm(oats.pr[[4]][,"Residuals"], ylab = "Stratum 4 residuals") qqline(oats.pr[[4]][,"Residuals"]) par(mfrow = c(1,1), pty = "m") oats.aov2 <- aov(Y ~ N + V + Error(B/V), data = oats, qr = TRUE) model.tables(oats.aov2, type = "means", se = TRUE) } \keyword{datasets} MASS/man/hubers.Rd0000644000176000001440000000177511754562034013420 0ustar ripleyusers% file MASS/man/hubers.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{hubers} \alias{hubers} \title{ Huber Proposal 2 Robust Estimator of Location and/or Scale } \description{ Finds the Huber M-estimator for location with scale specified, scale with location specified, or both if neither is specified. } \usage{ hubers(y, k = 1.5, mu, s, initmu = median(y), tol = 1e-06) } \arguments{ \item{y}{ vector y of data values } \item{k}{ Winsorizes at \code{k} standard deviations } \item{mu}{ specified location } \item{s}{ specified scale } \item{initmu}{ initial value of \code{mu} } \item{tol}{ convergence tolerance }} \value{ list of location and scale estimates \item{mu}{ location estimate } \item{s}{ scale estimate }} \references{ Huber, P. J. (1981) \emph{Robust Statistics.} Wiley. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{huber}} } \examples{ hubers(chem) hubers(chem, mu=3.68) } \keyword{robust} MASS/man/beav1.Rd0000644000176000001440000000454412171733033013114 0ustar ripleyusers% file MASS/man/beav1.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{beav1} \alias{beav1} \title{ Body Temperature Series of Beaver 1 } \description{ Reynolds (1994) describes a small part of a study of the long-term temperature dynamics of beaver \emph{Castor canadensis} in north-central Wisconsin. Body temperature was measured by telemetry every 10 minutes for four females, but data from a one period of less than a day for each of two animals is used there. } \usage{ beav1 } \format{ The \code{beav1} data frame has 114 rows and 4 columns. This data frame contains the following columns: \describe{ \item{\code{day}}{ Day of observation (in days since the beginning of 1990), December 12--13. } \item{\code{time}}{ Time of observation, in the form \code{0330} for 3.30am. } \item{\code{temp}}{ Measured body temperature in degrees Celsius. } \item{\code{activ}}{ Indicator of activity outside the retreat. } } } \note{ The observation at 22:20 is missing. } \source{ P. S. Reynolds (1994) Time-series analyses of beaver body temperatures. Chapter 11 of Lange, N., Ryan, L., Billard, L., Brillinger, D., Conquest, L. and Greenhouse, J. eds (1994) \emph{Case Studies in Biometry.} New York: John Wiley and Sons. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{beav2}} } \examples{ beav1 <- within(beav1, hours <- 24*(day-346) + trunc(time/100) + (time\%\%100)/60) plot(beav1$hours, beav1$temp, type="l", xlab="time", ylab="temperature", main="Beaver 1") usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr=usr) lines(beav1$hours, beav1$activ, type="s", lty=2) temp <- ts(c(beav1$temp[1:82], NA, beav1$temp[83:114]), start = 9.5, frequency = 6) activ <- ts(c(beav1$activ[1:82], NA, beav1$activ[83:114]), start = 9.5, frequency = 6) acf(temp[1:53]) acf(temp[1:53], type = "partial") ar(temp[1:53]) act <- c(rep(0, 10), activ) X <- cbind(1, act = act[11:125], act1 = act[10:124], act2 = act[9:123], act3 = act[8:122]) alpha <- 0.80 stemp <- as.vector(temp - alpha*lag(temp, -1)) sX <- X[-1, ] - alpha * X[-115,] beav1.ls <- lm(stemp ~ -1 + sX, na.action = na.omit) summary(beav1.ls, cor = FALSE) rm(temp, activ) } \keyword{datasets} MASS/man/loglm.Rd0000644000176000001440000001360212175703657013240 0ustar ripleyusers% file MASS/man/loglm.Rd % copyright (C) 1994-2013 W. N. Venables and B. D. Ripley % \name{loglm} \alias{loglm} % \alias{anova.loglm} % \alias{print.anova.loglm} % \alias{coef.loglm} % \alias{extractAIC.loglm} % \alias{fitted.loglm} % \alias{print.loglm} % \alias{residuals.loglm} % \alias{update.loglm} \title{ Fit Log-Linear Models by Iterative Proportional Scaling } \description{ This function provides a front-end to the standard function, \code{loglin}, to allow log-linear models to be specified and fitted in a manner similar to that of other fitting functions, such as \code{glm}. } \usage{ loglm(formula, data, subset, na.action, \dots) } \arguments{ \item{formula}{ A linear model formula specifying the log-linear model. If the left-hand side is empty, the \code{data} argument is required and must be a (complete) array of frequencies. In this case the variables on the right-hand side may be the names of the \code{dimnames} attribute of the frequency array, or may be the positive integers: 1, 2, 3, \dots used as alternative names for the 1st, 2nd, 3rd, \dots dimension (classifying factor). If the left-hand side is not empty it specifies a vector of frequencies. In this case the data argument, if present, must be a data frame from which the left-hand side vector and the classifying factors on the right-hand side are (preferentially) obtained. The usual abbreviation of a \code{.} to stand for \sQuote{all other variables in the data frame} is allowed. Any non-factors on the right-hand side of the formula are coerced to factor. } \item{data}{ Numeric array or data frame. In the first case it specifies the array of frequencies; in then second it provides the data frame from which the variables occurring in the formula are preferentially obtained in the usual way. This argument may be the result of a call to \code{\link{xtabs}}. } \item{subset}{ Specifies a subset of the rows in the data frame to be used. The default is to take all rows. } \item{na.action}{ Specifies a method for handling missing observations. The default is to fail if missing values are present. } \item{\dots}{ May supply other arguments to the function \code{\link{loglm1}}. }} \value{ An object of class \code{"loglm"} conveying the results of the fitted log-linear model. Methods exist for the generic functions \code{print}, \code{summary}, \code{deviance}, \code{fitted}, \code{coef}, \code{resid}, \code{anova} and \code{update}, which perform the expected tasks. Only log-likelihood ratio tests are allowed using \code{anova}. The deviance is simply an alternative name for the log-likelihood ratio statistic for testing the current model within a saturated model, in accordance with standard usage in generalized linear models. } \details{ If the left-hand side of the formula is empty the \code{data} argument supplies the frequency array and the right-hand side of the formula is used to construct the list of fixed faces as required by \code{loglin}. Structural zeros may be specified by giving a \code{start} argument with those entries set to zero, as described in the help information for \code{loglin}. If the left-hand side is not empty, all variables on the right-hand side are regarded as classifying factors and an array of frequencies is constructed. If some cells in the complete array are not specified they are treated as structural zeros. The right-hand side of the formula is again used to construct the list of faces on which the observed and fitted totals must agree, as required by \code{loglin}. Hence terms such as \code{a:b}, \code{a*b} and \code{a/b} are all equivalent. } \section{Warning}{ If structural zeros are present, the calculation of degrees of freedom may not be correct. \code{loglin} itself takes no action to allow for structural zeros. \code{loglm} deducts one degree of freedom for each structural zero, but cannot make allowance for gains in error degrees of freedom due to loss of dimension in the model space. (This would require checking the rank of the model matrix, but since iterative proportional scaling methods are developed largely to avoid constructing the model matrix explicitly, the computation is at least difficult.) When structural zeros (or zero fitted values) are present the estimated coefficients will not be available due to infinite estimates. The deviances will normally continue to be correct, though. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{loglm1}}, \code{\link{loglin}} } \examples{ # The data frames Cars93, minn38 and quine are available # in the MASS package. # Case 1: frequencies specified as an array. sapply(minn38, function(x) length(levels(x))) ## hs phs fol sex f ## 3 4 7 2 0 ##minn38a <- array(0, c(3,4,7,2), lapply(minn38[, -5], levels)) ##minn38a[data.matrix(minn38[,-5])] <- minn38$f ## or more simply minn38a <- xtabs(f ~ ., minn38) fm <- loglm(~ 1 + 2 + 3 + 4, minn38a) # numerals as names. deviance(fm) ## [1] 3711.9 fm1 <- update(fm, .~.^2) fm2 <- update(fm, .~.^3, print = TRUE) ## 5 iterations: deviation 0.075 anova(fm, fm1, fm2) # Case 1. An array generated with xtabs. loglm(~ Type + Origin, xtabs(~ Type + Origin, Cars93)) # Case 2. Frequencies given as a vector in a data frame names(quine) ## [1] "Eth" "Sex" "Age" "Lrn" "Days" fm <- loglm(Days ~ .^2, quine) gm <- glm(Days ~ .^2, poisson, quine) # check glm. c(deviance(fm), deviance(gm)) # deviances agree ## [1] 1368.7 1368.7 c(fm$df, gm$df) # resid df do not! c(fm$df, gm$df.residual) # resid df do not! ## [1] 127 128 # The loglm residual degrees of freedom is wrong because of # a non-detectable redundancy in the model matrix. } \keyword{category} \keyword{models} MASS/man/snails.Rd0000644000176000001440000000302111754562034013403 0ustar ripleyusers% file MASS/man/snails.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{snails} \alias{snails} \title{ Snail Mortality Data } \description{ Groups of 20 snails were held for periods of 1, 2, 3 or 4 weeks in carefully controlled conditions of temperature and relative humidity. There were two species of snail, A and B, and the experiment was designed as a 4 by 3 by 4 by 2 completely randomized design. At the end of the exposure time the snails were tested to see if they had survived; the process itself is fatal for the animals. The object of the exercise was to model the probability of survival in terms of the stimulus variables, and in particular to test for differences between species. The data are unusual in that in most cases fatalities during the experiment were fairly small. } \usage{ snails } \format{ The data frame contains the following components: \describe{ \item{\code{Species}}{ snail species A (\code{1}) or B (\code{2}). } \item{\code{Exposure}}{ exposure in weeks. } \item{\code{Rel.Hum}}{ relative humidity (4 levels). } \item{\code{Temp}}{ temperature, in degrees Celsius (3 levels). } \item{\code{Deaths}}{ number of deaths. } \item{\code{N}}{ number of snails exposed. } } } \source{ Zoology Department, The University of Adelaide. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/plot.profile.Rd0000644000176000001440000000400011754562034014525 0ustar ripleyusers% file MASS/man/plot.profile.Rd % copyright (C) 1999-2008 W. N. Venables and B. D. Ripley % \name{plot.profile} \alias{plot.profile} \alias{pairs.profile} \title{Plotting Functions for 'profile' Objects} \description{ \code{\link{plot}} and \code{\link{pairs}} methods for objects of class \code{"profile"}. } \usage{ \method{plot}{profile}(x, ...) \method{pairs}{profile}(x, colours = 2:3, ...) } \arguments{ \item{x}{an object inheriting from class \code{"profile"}.} \item{colours}{Colours to be used for the mean curves conditional on \code{x} and \code{y} respectively.} \item{\dots}{arguments passed to or from other methods.} } \details{ This is the main \code{plot} method for objects created by \code{\link{profile.glm}}. It can also be called on objects created by \code{\link{profile.nls}}, but they have a specific method, \code{\link{plot.profile.nls}}. The \code{pairs} method shows, for each pair of parameters x and y, two curves intersecting at the maximum likelihood estimate, which give the loci of the points at which the tangents to the contours of the bivariate profile likelihood become vertical and horizontal, respectively. In the case of an exactly bivariate normal profile likelihood, these two curves would be straight lines giving the conditional means of y|x and x|y, and the contours would be exactly elliptical. } \author{ Originally, D. M. Bates and W. N. Venables. (For S in 1996.) } \seealso{ \code{\link{profile.glm}}, \code{\link{profile.nls}}. } \examples{ ## see ?profile.glm for an example using glm fits. ## a version of example(profile.nls) from R >= 2.8.0 fm1 <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD) pr1 <- profile(fm1, alpha = 0.1) MASS:::plot.profile(pr1) pairs(pr1) # a little odd since the parameters are highly correlated ## an example from ?nls x <- -(1:100)/10 y <- 100 + 10 * exp(x / 2) + rnorm(x)/10 nlmod <- nls(y ~ Const + A * exp(B * x), start=list(Const=100, A=10, B=1)) pairs(profile(nlmod)) } \keyword{models} \keyword{hplot} MASS/man/road.Rd0000644000176000001440000000156311754562034013050 0ustar ripleyusers% file MASS/man/road.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{road} \alias{road} \title{ Road Accident Deaths in US States } \description{ A data frame with the annual deaths in road accidents for half the US states. } \usage{ road } \format{ Columns are: \describe{ \item{\code{state}}{ name. } \item{\code{deaths}}{ number of deaths. } \item{\code{drivers}}{ number of drivers (in 10,000s). } \item{\code{popden}}{ population density in people per square mile. } \item{\code{rural}}{ length of rural roads, in 1000s of miles. } \item{\code{temp}}{ average daily maximum temperature in January. } \item{\code{fuel}}{ fuel consumption in 10,000,000 US gallons per year. } } } \source{ Imperial College, London M.Sc. exercise } \keyword{datasets} MASS/man/corresp.Rd0000644000176000001440000000614412463405616013600 0ustar ripleyusers% file MASS/man/corresp.Rd % copyright (C) 1994-2015 W. N. Venables and B. D. Ripley % \name{corresp} \alias{corresp} \alias{corresp.xtabs} \alias{corresp.data.frame} \alias{corresp.default} \alias{corresp.factor} \alias{corresp.formula} \alias{corresp.matrix} % \alias{biplot.correspondence} % \alias{plot.correspondence} % \alias{print.correspondence} \title{ Simple Correspondence Analysis } \description{ Find the principal canonical correlation and corresponding row- and column-scores from a correspondence analysis of a two-way contingency table. } \usage{ corresp(x, \dots) \method{corresp}{matrix}(x, nf = 1, \dots) \method{corresp}{factor}(x, y, \dots) \method{corresp}{data.frame}(x, \dots) \method{corresp}{xtabs}(x, \dots) \method{corresp}{formula}(formula, data, \dots) } \arguments{ \item{x, formula}{ The function is generic, accepting various forms of the principal argument for specifying a two-way frequency table. Currently accepted forms are matrices, data frames (coerced to frequency tables), objects of class \code{"\link{xtabs}"} and formulae of the form \code{~ F1 + F2}, where \code{F1} and \code{F2} are factors. } \item{nf}{ The number of factors to be computed. Note that although 1 is the most usual, one school of thought takes the first two singular vectors for a sort of biplot. } \item{y}{a second factor for a cross-classification.} \item{data}{a data frame against which to preferentially resolve variables in the formula.} \item{\dots}{ If the principal argument is a formula, a data frame may be specified as well from which variables in the formula are preferentially satisfied. } } \value{ An list object of class \code{"correspondence"} for which \code{print}, \code{plot} and \code{biplot} methods are supplied. The main components are the canonical correlation(s) and the row and column scores. } \details{ See Venables & Ripley (2002). The \code{plot} method produces a graphical representation of the table if \code{nf=1}, with the \emph{areas} of circles representing the numbers of points. If \code{nf} is two or more the \code{biplot} method is called, which plots the second and third columns of the matrices \code{A = Dr^(-1/2) U L} and \code{B = Dc^(-1/2) V L} where the singular value decomposition is \code{U L V}. Thus the x-axis is the canonical correlation times the row and column scores. Although this is called a biplot, it does \emph{not} have any useful inner product relationship between the row and column scores. Think of this as an equally-scaled plot with two unrelated sets of labels. The origin is marked on the plot with a cross. (For other versions of this plot see the book.) } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. Gower, J. C. and Hand, D. J. (1996) \emph{Biplots.} Chapman & Hall. } \seealso{ \code{\link{svd}}, \code{\link{princomp}}. } \examples{ (ct <- corresp(~ Age + Eth, data = quine)) plot(ct) corresp(caith) biplot(corresp(caith, nf = 2)) } \keyword{category} \keyword{multivariate} MASS/man/kde2d.Rd0000644000176000001440000000450511754562034013113 0ustar ripleyusers% file MASS/man/kde2d.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{kde2d} \alias{kde2d} \title{ Two-Dimensional Kernel Density Estimation } \description{ Two-dimensional kernel density estimation with an axis-aligned bivariate normal kernel, evaluated on a square grid. } \usage{ kde2d(x, y, h, n = 25, lims = c(range(x), range(y))) } \arguments{ \item{x}{ x coordinate of data } \item{y}{ y coordinate of data } \item{h}{ vector of bandwidths for x and y directions. Defaults to normal reference bandwidth (see \code{\link{bandwidth.nrd}}). A scalar value will be taken to apply to both directions. } \item{n}{ Number of grid points in each direction. Can be scalar or a length-2 integer vector. } \item{lims}{ The limits of the rectangle covered by the grid as \code{c(xl, xu, yl, yu)}. }} \value{ A list of three components. \item{x, y}{ The x and y coordinates of the grid points, vectors of length \code{n}. } \item{z}{ An \code{n[1]} by \code{n[2]} matrix of the estimated density: rows correspond to the value of \code{x}, columns to the value of \code{y}. }} \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ attach(geyser) plot(duration, waiting, xlim = c(0.5,6), ylim = c(40,100)) f1 <- kde2d(duration, waiting, n = 50, lims = c(0.5, 6, 40, 100)) image(f1, zlim = c(0, 0.05)) f2 <- kde2d(duration, waiting, n = 50, lims = c(0.5, 6, 40, 100), h = c(width.SJ(duration), width.SJ(waiting)) ) image(f2, zlim = c(0, 0.05)) persp(f2, phi = 30, theta = 20, d = 5) plot(duration[-272], duration[-1], xlim = c(0.5, 6), ylim = c(1, 6),xlab = "previous duration", ylab = "duration") f1 <- kde2d(duration[-272], duration[-1], h = rep(1.5, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) contour(f1, xlab = "previous duration", ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) f1 <- kde2d(duration[-272], duration[-1], h = rep(0.6, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) contour(f1, xlab = "previous duration", ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) f1 <- kde2d(duration[-272], duration[-1], h = rep(0.4, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) contour(f1, xlab = "previous duration", ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) detach("geyser") } \keyword{dplot} MASS/man/anova.negbin.Rd0000644000176000001440000000412111754562034014461 0ustar ripleyusers% file MASS/man/anova.negbin.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{anova.negbin} \alias{anova.negbin} \title{ Likelihood Ratio Tests for Negative Binomial GLMs } \description{ Method function to perform sequential likelihood ratio tests for Negative Binomial generalized linear models. } \usage{ \method{anova}{negbin}(object, \dots, test = "Chisq") } \arguments{ \item{object}{ Fitted model object of class \code{"negbin"}, inheriting from classes \code{"glm"} and \code{"lm"}, specifying a Negative Binomial fitted GLM. Typically the output of \code{\link{glm.nb}()}. } \item{\dots}{ Zero or more additional fitted model objects of class \code{"negbin"}. They should form a nested sequence of models, but need not be specified in any particular order. } \item{test}{ Argument to match the \code{test} argument of \code{\link{anova.glm}}. Ignored (with a warning if changed) if a sequence of two or more Negative Binomial fitted model objects is specified, but possibly used if only one object is specified. }} \note{ If only one fitted model object is specified, a sequential analysis of deviance table is given for the fitted model. The \code{theta} parameter is kept fixed. If more than one fitted model object is specified they must all be of class \code{"negbin"} and likelihood ratio tests are done of each model within the next. In this case \code{theta} is assumed to have been re-estimated for each model. } \details{ This function is a method for the generic function \code{anova()} for class \code{"negbin"}. It can be invoked by calling \code{anova(x)} for an object \code{x} of the appropriate class, or directly by calling \code{anova.negbin(x)} regardless of the class of the object. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{glm.nb}}, \code{\link{negative.binomial}}, \code{\link{summary.negbin}} } \examples{ m1 <- glm.nb(Days ~ Eth*Age*Lrn*Sex, quine, link = log) m2 <- update(m1, . ~ . - Eth:Age:Lrn:Sex) anova(m2, m1) anova(m2) } \keyword{regression} MASS/man/predict.mca.Rd0000644000176000001440000000227211754562034014312 0ustar ripleyusers% file MASS/man/predict.mca.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{predict.mca} \alias{predict.mca} \title{ Predict Method for Class 'mca' } \description{ Used to compute coordinates for additional rows or additional factors in a multiple correspondence analysis. } \usage{ \method{predict}{mca}(object, newdata, type = c("row", "factor"), \dots) } \arguments{ \item{object}{ An object of class \code{"mca"}, usually the result of a call to \code{mca}. } \item{newdata}{ A data frame containing \emph{either} additional rows of the factors used to fit \code{object} \emph{or} additional factors for the cases used in the original fit. } \item{type}{ Are predictions required for further rows or for new factors? } \item{\dots}{ Additional arguments from \code{predict}: unused. }} \value{ If \code{type = "row"}, the coordinates for the additional rows. If \code{type = "factor"}, the coordinates of the column vertices for the levels of the new factors. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{mca}}, \code{\link{plot.mca}} } \keyword{category} \keyword{multivariate} MASS/man/accdeaths.Rd0000644000176000001440000000123311754562034014034 0ustar ripleyusers% file MASS/man/accdeaths.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{accdeaths} \alias{accdeaths} \title{ Accidental Deaths in the US 1973-1978 } \description{ A regular time series giving the monthly totals of accidental deaths in the USA. } \usage{ accdeaths } \details{ The values for first six months of 1979 (p. 326) were \code{7798 7406 8363 8460 9217 9316}. } \source{ P. J. Brockwell and R. A. Davis (1991) \emph{Time Series: Theory and Methods.} Springer, New York. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/cabbages.Rd0000644000176000001440000000215711754562034013652 0ustar ripleyusers% file MASS/man/cabbages.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{cabbages} \alias{cabbages} \title{ Data from a cabbage field trial } \description{ The \code{cabbages} data set has 60 observations and 4 variables } \usage{ cabbages } \format{ This data frame contains the following columns: \describe{ \item{\code{Cult}}{ Factor giving the cultivar of the cabbage, two levels: \code{c39} and \code{c52}. } \item{\code{Date}}{ Factor specifying one of three planting dates: \code{d16}, \code{d20} or \code{d21}. } \item{\code{HeadWt}}{ Weight of the cabbage head, presumably in kg. } \item{\code{VitC}}{ Ascorbic acid content, in undefined units. } } } \source{ Rawlings, J. O. (1988) \emph{Applied Regression Analysis: A Research Tool.} Wadsworth and Brooks/Cole. Example 8.4, page 219. (Rawlings cites the original source as the files of the late Dr Gertrude M Cox.) } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/Boston.Rd0000644000176000001440000000355111754562034013366 0ustar ripleyusers% file MASS/man/Boston.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Boston} \alias{Boston} \title{ Housing Values in Suburbs of Boston } \description{ The \code{Boston} data frame has 506 rows and 14 columns. } \usage{ Boston } \format{ This data frame contains the following columns: \describe{ \item{\code{crim}}{ per capita crime rate by town. } \item{\code{zn}}{ proportion of residential land zoned for lots over 25,000 sq.ft. } \item{\code{indus}}{ proportion of non-retail business acres per town. } \item{\code{chas}}{ Charles River dummy variable (= 1 if tract bounds river; 0 otherwise). } \item{\code{nox}}{ nitrogen oxides concentration (parts per 10 million). } \item{\code{rm}}{ average number of rooms per dwelling. } \item{\code{age}}{ proportion of owner-occupied units built prior to 1940. } \item{\code{dis}}{ weighted mean of distances to five Boston employment centres. } \item{\code{rad}}{ index of accessibility to radial highways. } \item{\code{tax}}{ full-value property-tax rate per \$10,000. } \item{\code{ptratio}}{ pupil-teacher ratio by town. } \item{\code{black}}{ \eqn{1000(Bk - 0.63)^2} where \eqn{Bk} is the proportion of blacks by town. } \item{\code{lstat}}{ lower status of the population (percent). } \item{\code{medv}}{ median value of owner-occupied homes in \$1000s. } } } \source{ Harrison, D. and Rubinfeld, D.L. (1978) Hedonic prices and the demand for clean air. \emph{J. Environ. Economics and Management} \bold{5}, 81--102. Belsley D.A., Kuh, E. and Welsch, R.E. (1980) \emph{Regression Diagnostics. Identifying Influential Data and Sources of Collinearity.} New York: Wiley. } \keyword{datasets} MASS/man/bacteria.Rd0000644000176000001440000000457712463405616013705 0ustar ripleyusers\name{bacteria} \alias{bacteria} \title{ Presence of Bacteria after Drug Treatments } \description{ Tests of the presence of the bacteria \emph{H. influenzae} in children with otitis media in the Northern Territory of Australia. } \usage{ bacteria } \format{ This data frame has 220 rows and the following columns: \describe{ \item{y}{presence or absence: a factor with levels \code{n} and \code{y}.} \item{ap}{active/placebo: a factor with levels \code{a} and \code{p}.} \item{hilo}{hi/low compliance: a factor with levels \code{hi} amd \code{lo}.} \item{week}{numeric: week of test.} \item{ID}{subject ID: a factor.} \item{trt}{a factor with levels \code{placebo}, \code{drug} and \code{drug+}, a re-coding of \code{ap} and \code{hilo}.} } } \details{ Dr A. Leach tested the effects of a drug on 50 children with a history of otitis media in the Northern Territory of Australia. The children were randomized to the drug or the a placebo, and also to receive active encouragement to comply with taking the drug. The presence of \emph{H. influenzae} was checked at weeks 0, 2, 4, 6 and 11: 30 of the checks were missing and are not included in this data frame. } \source{ Dr Amanda Leach \emph{via} Mr James McBroom. } \references{ Menzies School of Health Research 1999--2000 Annual Report. p.20. \url{http://www.menzies.edu.au/icms_docs/172302_2000_Annual_report.pdf}. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ contrasts(bacteria$trt) <- structure(contr.sdif(3), dimnames = list(NULL, c("drug", "encourage"))) ## fixed effects analyses summary(glm(y ~ trt * week, binomial, data = bacteria)) summary(glm(y ~ trt + week, binomial, data = bacteria)) summary(glm(y ~ trt + I(week > 2), binomial, data = bacteria)) # conditional random-effects analysis library(survival) bacteria$Time <- rep(1, nrow(bacteria)) coxph(Surv(Time, unclass(y)) ~ week + strata(ID), data = bacteria, method = "exact") coxph(Surv(Time, unclass(y)) ~ factor(week) + strata(ID), data = bacteria, method = "exact") coxph(Surv(Time, unclass(y)) ~ I(week > 2) + strata(ID), data = bacteria, method = "exact") # PQL glmm analysis library(nlme) summary(glmmPQL(y ~ trt + I(week > 2), random = ~ 1 | ID, family = binomial, data = bacteria)) } \keyword{datasets} MASS/man/rms.curv.Rd0000644000176000001440000000403611754562034013700 0ustar ripleyusers% file MASS/man/rms.curv.Rd % copyright (C) 1994-2009 W. N. Venables and B. D. Ripley % \name{rms.curv} \alias{rms.curv} \alias{print.rms.curv} \title{ Relative Curvature Measures for Non-Linear Regression } \description{ Calculates the root mean square parameter effects and intrinsic relative curvatures, \eqn{c^\theta}{c^theta} and \eqn{c^\iota}{c^iota}, for a fitted nonlinear regression, as defined in Bates & Watts, section 7.3, p. 253ff } \usage{ rms.curv(obj) } \arguments{ \item{obj}{ Fitted model object of class \code{"nls"}. The model must be fitted using the default algorithm. } } \value{ A list of class \code{rms.curv} with components \code{pc} and \code{ic} for parameter effects and intrinsic relative curvatures multiplied by sqrt(F), \code{ct} and \code{ci} for \eqn{c^\theta} and \eqn{c^\iota} (unmultiplied), and \code{C} the C-array as used in section 7.3.1 of Bates & Watts. } \details{ The method of section 7.3.1 of Bates & Watts is implemented. The function \code{deriv3} should be used generate a model function with first derivative (gradient) matrix and second derivative (Hessian) array attributes. This function should then be used to fit the nonlinear regression model. A print method, \code{print.rms.curv}, prints the \code{pc} and \code{ic} components only, suitably annotated. If either \code{pc} or \code{ic} exceeds some threshold (0.3 has been suggested) the curvature is unacceptably high for the planar assumption. } \references{ Bates, D. M, and Watts, D. G. (1988) \emph{Nonlinear Regression Analysis and its Applications.} Wiley, New York. } \seealso{ \code{\link{deriv3}} } \examples{ # The treated sample from the Puromycin data mmcurve <- deriv3(~ Vm * conc/(K + conc), c("Vm", "K"), function(Vm, K, conc) NULL) Treated <- Puromycin[Puromycin$state == "treated", ] (Purfit1 <- nls(rate ~ mmcurve(Vm, K, conc), data = Treated, start = list(Vm=200, K=0.1))) rms.curv(Purfit1) ##Parameter effects: c^theta x sqrt(F) = 0.2121 ## Intrinsic: c^iota x sqrt(F) = 0.092 } \keyword{nonlinear} MASS/man/plot.mca.Rd0000644000176000001440000000157111754562034013637 0ustar ripleyusers% file MASS/man/plot.mca.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{plot.mca} \alias{plot.mca} \title{ Plot Method for Objects of Class 'mca' } \description{ Plot a multiple correspondence analysis. } \usage{ \method{plot}{mca}(x, rows = TRUE, col, cex = par("cex"), \dots) } \arguments{ \item{x}{ An object of class \code{"mca"}. } \item{rows}{ Should the coordinates for the rows be plotted, or just the vertices for the levels? } \item{col, cex}{ The colours and \code{cex} to be used for the row points and level vertices respectively. } \item{\dots}{ Additional parameters to \code{plot}. }} \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{mca}}, \code{\link{predict.mca}} } \examples{ plot(mca(farms, abbrev = TRUE)) } \keyword{hplot} \keyword{multivariate} MASS/man/shoes.Rd0000644000176000001440000000106211754562034013236 0ustar ripleyusers% file MASS/man/shoes.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{shoes} \alias{shoes} \title{ Shoe wear data of Box, Hunter and Hunter } \description{ A list of two vectors, giving the wear of shoes of materials A and B for one foot each of ten boys. } \usage{ shoes } \source{ G. E. P. Box, W. G. Hunter and J. S. Hunter (1978) \emph{Statistics for Experimenters.} Wiley, p. 100 } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/hills.Rd0000644000176000001440000000174311754562034013236 0ustar ripleyusers% file MASS/man/hills.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{hills} \alias{hills} \title{ Record Times in Scottish Hill Races } \description{ The record times in 1984 for 35 Scottish hill races. } \usage{ hills } \format{ The components are: \describe{ \item{\code{dist}}{ distance in miles (on the map). } \item{\code{climb}}{ total height gained during the route, in feet. } \item{\code{time}}{ record time in minutes. } } } \source{ A.C. Atkinson (1986) Comment: Aspects of diagnostic regression analysis. \emph{Statistical Science} \bold{1}, 397--402. [A.C. Atkinson (1988) Transformations unmasked. \emph{Technometrics} \bold{30}, 311--318 \dQuote{corrects} the time for Knock Hill from 78.65 to 18.65. It is unclear if this based on the original records.] } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/con2tr.Rd0000644000176000001440000000120411754562034013322 0ustar ripleyusers\name{con2tr} \alias{con2tr} \title{ Convert Lists to Data Frames for use by lattice } \description{ Convert lists to data frames for use by lattice. } \usage{ con2tr(obj) } \arguments{ \item{obj}{ A list of components \code{x}, \code{y} and \code{z} as passed to \code{contour}. } } \value{ A data frame suitable for passing to lattice (formerly trellis) functions. } \details{ \code{con2tr} repeats the \code{x} and \code{y} components suitably to match the vector \code{z}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{misc} MASS/man/summary.loglm.Rd0000644000176000001440000000276311754562034014734 0ustar ripleyusers% file MASS/man/summary.loglm.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{summary.loglm} \alias{summary.loglm} \alias{print.summary.loglm} \title{ Summary Method Function for Objects of Class 'loglm' } \description{ Returns a summary list for log-linear models fitted by iterative proportional scaling using \code{loglm}. } \usage{ \method{summary}{loglm}(object, fitted = FALSE, \dots) } \arguments{ \item{object}{ a fitted loglm model object. } \item{fitted}{ if \code{TRUE} return observed and expected frequencies in the result. Using \code{fitted = TRUE} may necessitate re-fitting the object. } \item{\dots}{ arguments to be passed to or from other methods. }} \value{ a list is returned for use by \code{print.summary.loglm}. This has components \item{formula}{ the formula used to produce \code{object} } \item{tests}{ the table of test statistics (likelihood ratio, Pearson) for the fit. } \item{oe}{ if \code{fitted = TRUE}, an array of the observed and expected frequencies, otherwise \code{NULL}. }} \details{ This function is a method for the generic function \code{summary()} for class \code{"loglm"}. It can be invoked by calling \code{summary(x)} for an object \code{x} of the appropriate class, or directly by calling \code{summary.loglm(x)} regardless of the class of the object. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{loglm}}, \code{\link{summary}} } \keyword{models} MASS/man/addterm.Rd0000644000176000001440000000645111754562034013544 0ustar ripleyusers% file MASS/man/addterm.Rd % copyright (C) 1998-9 W. N. Venables and B. D. Ripley % \name{addterm} \alias{addterm} \alias{addterm.default} \alias{addterm.glm} \alias{addterm.lm} %\alias{addterm.mlm} %\alias{addterm.negbin} %\alias{addterm.survreg} \title{ Try All One-Term Additions to a Model } \description{ Try fitting all models that differ from the current model by adding a single term from those supplied, maintaining marginality. This function is generic; there exist methods for classes \code{lm} and \code{glm} and the default method will work for many other classes. } \usage{ addterm(object, \dots) \method{addterm}{default}(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, \dots) \method{addterm}{lm}(object, scope, scale = 0, test = c("none", "Chisq", "F"), k = 2, sorted = FALSE, \dots) \method{addterm}{glm}(object, scope, scale = 0, test = c("none", "Chisq", "F"), k = 2, sorted = FALSE, trace = FALSE, \dots) } \arguments{ \item{object}{ An object fitted by some model-fitting function. } \item{scope}{ a formula specifying a maximal model which should include the current one. All additional terms in the maximal model with all marginal terms in the original model are tried. } \item{scale}{ used in the definition of the AIC statistic for selecting the models, currently only for \code{lm}, \code{aov} and \code{glm} models. Specifying \code{scale} asserts that the residual standard error or dispersion is known. } \item{test}{ should the results include a test statistic relative to the original model? The F test is only appropriate for \code{lm} and \code{aov} models, and perhaps for some over-dispersed \code{glm} models. The Chisq test can be an exact test (\code{lm} models with known scale) or a likelihood-ratio test depending on the method. } \item{k}{ the multiple of the number of degrees of freedom used for the penalty. Only \code{k=2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC. } \item{sorted}{ should the results be sorted on the value of AIC? } \item{trace}{ if \code{TRUE} additional information may be given on the fits as they are tried. } \item{\dots}{ arguments passed to or from other methods. }} \value{ A table of class \code{"anova"} containing at least columns for the change in degrees of freedom and AIC (or Cp) for the models. Some methods will give further information, for example sums of squares, deviances, log-likelihoods and test statistics. } \details{ The definition of AIC is only up to an additive constant: when appropriate (\code{lm} models with specified scale) the constant is taken to be that used in Mallows' Cp statistic and the results are labelled accordingly. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{dropterm}}, \code{\link{stepAIC}} } \examples{ quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) quine.lo <- aov(log(Days+2.5) ~ 1, quine) addterm(quine.lo, quine.hi, test="F") house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family=poisson, data=housing) addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test="Chisq") house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq") } \keyword{models} MASS/man/truehist.Rd0000644000176000001440000000461311754562034013771 0ustar ripleyusers% file MASS/man/truehist.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{truehist} \alias{truehist} \title{ Plot a Histogram } \description{ Creates a histogram on the current graphics device. } \usage{ truehist(data, nbins = "Scott", h, x0 = -h/1000, breaks, prob = TRUE, xlim = range(breaks), ymax = max(est), col = "cyan", xlab = deparse(substitute(data)), bty = "n", \dots) } \arguments{ \item{data}{ numeric vector of data for histogram. Missing values (\code{NA}s) are allowed and omitted. } \item{nbins}{ The suggested number of bins. Either a positive integer, or a character string naming a rule: \code{"Scott"} or \code{"Freedman-Diaconis"} or \code{"FD"}. (Case is ignored.) } \item{h}{ The bin width, a strictly positive number (takes precedence over \code{nbins}). } \item{x0}{ Shift for the bins - the breaks are at \code{x0 + h * (\dots, -1, 0, 1, \dots)} } \item{breaks}{ The set of breakpoints to be used. (Usually omitted, takes precedence over \code{h} and \code{nbins}). } \item{prob}{ If true (the default) plot a true histogram. The vertical axis has a \emph{relative frequency density} scale, so the product of the dimensions of any panel gives the relative frequency. Hence the total area under the histogram is 1 and it is directly comparable with most other estimates of the probability density function. If false plot the counts in the bins. } \item{xlim}{ The limits for the x-axis. } \item{ymax}{ The upper limit for the y-axis. } \item{col}{ The colour for the bar fill: the default is colour 5 in the default \R palette. } \item{xlab}{ label for the plot x-axis. By default, this will be the name of \code{data}. } \item{bty}{ The box type for the plot - defaults to none. } \item{\dots}{ additional arguments to \code{\link{rect}} or \code{\link{plot}}. }} \section{Side Effects}{ A histogram is plotted on the current device. } \details{ This plots a true histogram, a density estimate of total area 1. If \code{breaks} is specified, those breakpoints are used. Otherwise if \code{h} is specified, a regular grid of bins is used with width \code{h}. If neither \code{breaks} nor \code{h} is specified, \code{nbins} is used to select a suitable \code{h}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{hist}} } \keyword{hplot} \keyword{dplot} MASS/man/synth.tr.Rd0000644000176000001440000000163411754562034013713 0ustar ripleyusers% file MASS/man/synth.tr.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{synth.tr} \alias{synth.tr} \alias{synth.te} \title{ Synthetic Classification Problem } \description{ The \code{synth.tr} data frame has 250 rows and 3 columns. The \code{synth.te} data frame has 100 rows and 3 columns. It is intended that \code{synth.tr} be used from training and \code{synth.te} for testing. } \usage{ synth.tr synth.te } \format{ These data frames contains the following columns: \describe{ \item{\code{xs}}{ x-coordinate } \item{\code{ys}}{ y-coordinate } \item{\code{yc}}{ class, coded as 0 or 1. }}} \source{ Ripley, B.D. (1994) Neural networks and related methods for classification (with discussion). \emph{Journal of the Royal Statistical Society series B} \bold{56}, 409--456. Ripley, B.D. (1996) \emph{Pattern Recognition and Neural Networks.} Cambridge: Cambridge University Press. } \keyword{datasets} MASS/man/cement.Rd0000644000176000001440000000204311754562034013370 0ustar ripleyusers% file MASS/man/cement.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{cement} \alias{cement} \title{ Heat Evolved by Setting Cements } \description{ Experiment on the heat evolved in the setting of each of 13 cements. } \usage{ cement } \details{ Thirteen samples of Portland cement were set. For each sample, the percentages of the four main chemical ingredients was accurately measured. While the cement was setting the amount of heat evolved was also measured. } \format{ \describe{ \item{\code{x1, x2, x3, x4}}{ Proportions (\%) of active ingredients. } \item{\code{y}}{ heat evolved in cals/gm. } } } \source{ Woods, H., Steinour, H.H. and Starke, H.R. (1932) Effect of composition of Portland cement on heat evolved during hardening. \emph{Industrial Engineering and Chemistry}, \bold{24}, 1207--1214. } \references{ Hald, A. (1957) \emph{Statistical Theory with Engineering Applications.} Wiley, New York. } \examples{ lm(y ~ x1 + x2 + x3 + x4, cement) } \keyword{datasets} MASS/man/Melanoma.Rd0000644000176000001440000000202611754562034013647 0ustar ripleyusers% file MASS/man/Melanoma.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Melanoma} \alias{Melanoma} \title{ Survival from Malignant Melanoma } \description{ The \code{Melanoma} data frame has data on 205 patients in Denmark with malignant melanoma. } \usage{ Melanoma } \format{ This data frame contains the following columns: \describe{ \item{\code{time}}{ survival time in days, possibly censored. } \item{\code{status}}{ \code{1} died from melanoma, \code{2} alive, \code{3} dead from other causes. } \item{\code{sex}}{ \code{1} = male, \code{0} = female. } \item{\code{age}}{ age in years. } \item{\code{year}}{ of operation. } \item{\code{thickness}}{ tumour thickness in mm. } \item{\code{ulcer}}{ \code{1} = presence, \code{0} = absence. } } } \source{ P. K. Andersen, O. Borgan, R. D. Gill and N. Keiding (1993) \emph{Statistical Models based on Counting Processes.} Springer. } \keyword{datasets} MASS/man/sammon.Rd0000644000176000001440000000535011754562034013413 0ustar ripleyusers% file MASS/man/sammon.Rd % copyright (C) 1994-2003 W. N. Venables and B. D. Ripley % \name{sammon} \alias{sammon} \title{ Sammon's Non-Linear Mapping } \description{ One form of non-metric multidimensional scaling. } \usage{ sammon(d, y = cmdscale(d, k), k = 2, niter = 100, trace = TRUE, magic = 0.2, tol = 1e-4) } \arguments{ \item{d}{ distance structure of the form returned by \code{dist}, or a full, symmetric matrix. Data are assumed to be dissimilarities or relative distances, but must be positive except for self-distance. This can contain missing values. } \item{y}{ An initial configuration. If none is supplied, \code{cmdscale} is used to provide the classical solution. (If there are missing values in \code{d}, an initial configuration must be provided.) This must not have duplicates. } \item{k}{ The dimension of the configuration. } \item{niter}{ The maximum number of iterations. } \item{trace}{ Logical for tracing optimization. Default \code{TRUE}. } \item{magic}{ initial value of the step size constant in diagonal Newton method. } \item{tol}{ Tolerance for stopping, in units of stress. }} \value{ Two components: \item{points}{ A two-column vector of the fitted configuration. } \item{stress}{ The final stress achieved. }} \section{Side Effects}{ If trace is true, the initial stress and the current stress are printed out every 10 iterations. } \details{ This chooses a two-dimensional configuration to minimize the stress, the sum of squared differences between the input distances and those of the configuration, weighted by the distances, the whole sum being divided by the sum of input distances to make the stress scale-free. An iterative algorithm is used, which will usually converge in around 50 iterations. As this is necessarily an \eqn{O(n^2)} calculation, it is slow for large datasets. Further, since the configuration is only determined up to rotations and reflections (by convention the centroid is at the origin), the result can vary considerably from machine to machine. In this release the algorithm has been modified by adding a step-length search (\code{magic}) to ensure that it always goes downhill. } \references{ Sammon, J. W. (1969) A non-linear mapping for data structure analysis. \emph{IEEE Trans. Comput.}, \bold{C-18} 401--409. Ripley, B. D. (1996) \emph{Pattern Recognition and Neural Networks}. Cambridge University Press. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{cmdscale}}, \code{\link{isoMDS}} } \examples{ swiss.x <- as.matrix(swiss[, -1]) swiss.sam <- sammon(dist(swiss.x)) plot(swiss.sam$points, type = "n") text(swiss.sam$points, labels = as.character(1:nrow(swiss.x))) } \keyword{multivariate} MASS/man/Traffic.Rd0000644000176000001440000000237111754562034013477 0ustar ripleyusers% file MASS/man/Traffic.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Traffic} \alias{Traffic} \title{ Effect of Swedish Speed Limits on Accidents } \description{ An experiment was performed in Sweden in 1961--2 to assess the effect of a speed limit on the motorway accident rate. The experiment was conducted on 92 days in each year, matched so that day \code{j} in 1962 was comparable to day \code{j} in 1961. On some days the speed limit was in effect and enforced, while on other days there was no speed limit and cars tended to be driven faster. The speed limit days tended to be in contiguous blocks. } \usage{ Traffic } \format{ This data frame contains the following columns: \describe{ \item{\code{year}}{ 1961 or 1962. } \item{\code{day}}{ of year. } \item{\code{limit}}{ was there a speed limit? } \item{\code{y}}{ traffic accident count for that day. } } } \source{ Svensson, A. (1981) On the goodness-of-fit test for the multiplicative Poisson model. \emph{Annals of Statistics,} \bold{9}, 697--704. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/qda.Rd0000644000176000001440000000733011754562034012666 0ustar ripleyusers% file MASS/man/qda.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{qda} \alias{qda} \alias{qda.data.frame} \alias{qda.default} \alias{qda.formula} \alias{qda.matrix} \alias{model.frame.qda} \alias{print.qda} \title{ Quadratic Discriminant Analysis } \description{ Quadratic discriminant analysis. } \usage{ qda(x, \dots) \method{qda}{formula}(formula, data, \dots, subset, na.action) \method{qda}{default}(x, grouping, prior = proportions, method, CV = FALSE, nu, \dots) \method{qda}{data.frame}(x, \dots) \method{qda}{matrix}(x, grouping, \dots, subset, na.action) } \arguments{ \item{formula}{ A formula of the form \code{groups ~ x1 + x2 + \dots} That is, the response is the grouping factor and the right hand side specifies the (non-factor) discriminators. } \item{data}{ Data frame from which variables specified in \code{formula} are preferentially to be taken. } \item{x}{ (required if no formula is given as the principal argument.) a matrix or data frame or Matrix containing the explanatory variables. } \item{grouping}{ (required if no formula principal argument is given.) a factor specifying the class for each observation. } \item{prior}{ the prior probabilities of class membership. If unspecified, the class proportions for the training set are used. If specified, the probabilities should be specified in the order of the factor levels. } \item{subset}{ An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.) } \item{na.action}{ A function to specify the action to be taken if \code{NA}s are found. The default action is for the procedure to fail. An alternative is na.omit, which leads to rejection of cases with missing values on any required variable. (NOTE: If given, this argument must be named.) } \item{method}{ \code{"moment"} for standard estimators of the mean and variance, \code{"mle"} for MLEs, \code{"mve"} to use \code{cov.mve}, or \code{"t"} for robust estimates based on a t distribution. } \item{CV}{ If true, returns results (classes and posterior probabilities) for leave-out-out cross-validation. Note that if the prior is estimated, the proportions in the whole dataset are used. } \item{nu}{ degrees of freedom for \code{method = "t"}. } \item{\dots}{ arguments passed to or from other methods. }} \value{ an object of class \code{"qda"} containing the following components: \item{prior}{ the prior probabilities used. } \item{means}{ the group means. } \item{scaling}{ for each group \code{i}, \code{scaling[,,i]} is an array which transforms observations so that within-groups covariance matrix is spherical. } \item{ldet}{ a vector of half log determinants of the dispersion matrix. } \item{lev}{ the levels of the grouping factor. } \item{terms}{ (if formula is a formula) an object of mode expression and class term summarizing the formula. } \item{call}{ the (matched) function call. } unless \code{CV=TRUE}, when the return value is a list with components: \item{class}{ The MAP classification (a factor) } \item{posterior}{ posterior probabilities for the classes }} \details{ Uses a QR decomposition which will give an error message if the within-group variance is singular for any group. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. Ripley, B. D. (1996) \emph{Pattern Recognition and Neural Networks}. Cambridge University Press. } \seealso{ \code{\link{predict.qda}}, \code{\link{lda}} } \examples{ tr <- sample(1:50, 25) train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) z <- qda(train, cl) predict(z,test)$class } \keyword{multivariate} MASS/man/cpus.Rd0000644000176000001440000000240411754562034013070 0ustar ripleyusers% file MASS/man/cpus.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{cpus} \alias{cpus} \title{ Performance of Computer CPUs } \description{ A relative performance measure and characteristics of 209 CPUs. } \usage{ cpus } \format{ The components are: \describe{ \item{\code{name}}{ manufacturer and model. } \item{\code{syct}}{ cycle time in nanoseconds. } \item{\code{mmin}}{ minimum main memory in kilobytes. } \item{\code{mmax}}{ maximum main memory in kilobytes. } \item{\code{cach}}{ cache size in kilobytes. } \item{\code{chmin}}{ minimum number of channels. } \item{\code{chmax}}{ maximum number of channels. } \item{\code{perf}}{ published performance on a benchmark mix relative to an IBM 370/158-3. } \item{\code{estperf}}{ estimated performance (by Ein-Dor & Feldmesser). } } } \source{ P. Ein-Dor and J. Feldmesser (1987) Attributes of the performance of central processing units: a relative performance prediction model. \emph{Comm. ACM.} \bold{30}, 308--317. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/newcomb.Rd0000644000176000001440000000147411754562034013556 0ustar ripleyusers% file MASS/man/newcomb.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{newcomb} \alias{newcomb} \title{ Newcomb's Measurements of the Passage Time of Light } \description{ A numeric vector giving the \sQuote{Third Series} of measurements of the passage time of light recorded by Newcomb in 1882. The given values divided by 1000 plus 24 give the time in millionths of a second for light to traverse a known distance. The \sQuote{true} value is now considered to be 33.02. } \usage{ newcomb } \source{ S. M. Stigler (1973) Simon Newcomb, Percy Daniell, and the history of robust estimation 1885--1920. \emph{Journal of the American Statistical Association} \bold{68}, 872--879. R. G. Staudte and S. J. Sheather (1990) \emph{Robust Estimation and Testing.} Wiley. } \keyword{datasets} MASS/man/beav2.Rd0000644000176000001440000000457513577076412013135 0ustar ripleyusers% file MASS/man/beav2.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{beav2} \alias{beav2} \title{ Body Temperature Series of Beaver 2 } \description{ Reynolds (1994) describes a small part of a study of the long-term temperature dynamics of beaver \emph{Castor canadensis} in north-central Wisconsin. Body temperature was measured by telemetry every 10 minutes for four females, but data from a one period of less than a day for each of two animals is used there. } \usage{ beav2 } \format{ The \code{beav2} data frame has 100 rows and 4 columns. This data frame contains the following columns: \describe{ \item{\code{day}}{ Day of observation (in days since the beginning of 1990), November 3--4. } \item{\code{time}}{ Time of observation, in the form \code{0330} for 3.30am. } \item{\code{temp}}{ Measured body temperature in degrees Celsius. } \item{\code{activ}}{ Indicator of activity outside the retreat. } } } \source{ P. S. Reynolds (1994) Time-series analyses of beaver body temperatures. Chapter 11 of Lange, N., Ryan, L., Billard, L., Brillinger, D., Conquest, L. and Greenhouse, J. eds (1994) \emph{Case Studies in Biometry.} New York: John Wiley and Sons. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{beav1}} } \examples{ attach(beav2) beav2$hours <- 24*(day-307) + trunc(time/100) + (time\%\%100)/60 plot(beav2$hours, beav2$temp, type = "l", xlab = "time", ylab = "temperature", main = "Beaver 2") usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr = usr) lines(beav2$hours, beav2$activ, type = "s", lty = 2) temp <- ts(temp, start = 8+2/3, frequency = 6) activ <- ts(activ, start = 8+2/3, frequency = 6) acf(temp[activ == 0]); acf(temp[activ == 1]) # also look at PACFs ar(temp[activ == 0]); ar(temp[activ == 1]) arima(temp, order = c(1,0,0), xreg = activ) dreg <- cbind(sin = sin(2*pi*beav2$hours/24), cos = cos(2*pi*beav2$hours/24)) arima(temp, order = c(1,0,0), xreg = cbind(active=activ, dreg)) ## IGNORE_RDIFF_BEGIN library(nlme) # for gls and corAR1 beav2.gls <- gls(temp ~ activ, data = beav2, corr = corAR1(0.8), method = "ML") summary(beav2.gls) summary(update(beav2.gls, subset = 6:100)) detach("beav2"); rm(temp, activ) ## IGNORE_RDIFF_END } \keyword{datasets} MASS/man/steam.Rd0000644000176000001440000000132611754562034013231 0ustar ripleyusers% file MASS/man/steam.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{steam} \alias{steam} \title{ The Saturated Steam Pressure Data } \description{ Temperature and pressure in a saturated steam driven experimental device. } \usage{ steam } \format{ The data frame contains the following components: \describe{ \item{\code{Temp}}{ temperature, in degrees Celsius. } \item{\code{Press}}{ pressure, in Pascals. } } } \source{ N.R. Draper and H. Smith (1981) \emph{Applied Regression Analysis.} Wiley, pp. 518--9. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/Cushings.Rd0000644000176000001440000000221511754562034013701 0ustar ripleyusers% file MASS/man/Cushings.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Cushings} \alias{Cushings} \title{ Diagnostic Tests on Patients with Cushing's Syndrome } \description{ Cushing's syndrome is a hypertensive disorder associated with over-secretion of cortisol by the adrenal gland. The observations are urinary excretion rates of two steroid metabolites. } \usage{ Cushings } \format{ The \code{Cushings} data frame has 27 rows and 3 columns: \describe{ \item{\code{Tetrahydrocortisone}}{ urinary excretion rate (mg/24hr) of Tetrahydrocortisone. } \item{\code{Pregnanetriol}}{ urinary excretion rate (mg/24hr) of Pregnanetriol. } \item{\code{Type}}{ underlying type of syndrome, coded \code{a} (adenoma) , \code{b} (bilateral hyperplasia), \code{c} (carcinoma) or \code{u} for unknown. } } } \source{ J. Aitchison and I. R. Dunsmore (1975) \emph{Statistical Prediction Analysis.} Cambridge University Press, Tables 11.1--3. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/Sitka89.Rd0000644000176000001440000000205311754562034013352 0ustar ripleyusers% file MASS/man/Sitka89.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Sitka89} \alias{Sitka89} \title{ Growth Curves for Sitka Spruce Trees in 1989 } \description{ The \code{Sitka89} data frame has 632 rows and 4 columns. It gives repeated measurements on the log-size of 79 Sitka spruce trees, 54 of which were grown in ozone-enriched chambers and 25 were controls. The size was measured eight times in 1989, at roughly monthly intervals. } \usage{ Sitka89 } \format{ This data frame contains the following columns: \describe{ \item{\code{size}}{measured size (height times diameter squared) of tree, on log scale.} \item{\code{Time}}{time of measurement in days since 1 January 1988.} \item{\code{tree}}{number of tree.} \item{\code{treat}}{either \code{"ozone"} for an ozone-enriched chamber or \code{"control"}.} } } \seealso{ \code{\link{Sitka}} } \source{ P. J. Diggle, K.-Y. Liang and S. L. Zeger (1994) \emph{Analysis of Longitudinal Data.} Clarendon Press, Oxford } \keyword{datasets} MASS/man/npk.Rd0000644000176000001440000000311513577077533012720 0ustar ripleyusers% file MASS/man/npk.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{npk} \alias{npk} \title{ Classical N, P, K Factorial Experiment } \description{ A classical N, P, K (nitrogen, phosphate, potassium) factorial experiment on the growth of peas conducted on 6 blocks. Each half of a fractional factorial design confounding the NPK interaction was used on 3 of the plots. } \usage{ npk } \format{ The \code{npk} data frame has 24 rows and 5 columns: \describe{ \item{\code{block}}{ which block (label 1 to 6). } \item{\code{N}}{ indicator (0/1) for the application of nitrogen. } \item{\code{P}}{ indicator (0/1) for the application of phosphate. } \item{\code{K}}{ indicator (0/1) for the application of potassium. } \item{\code{yield}}{ Yield of peas, in pounds/plot (the plots were (1/70) acre). } } } \note{ This dataset is also contained in \R 3.0.2 and later. } \source{ Imperial College, London, M.Sc. exercise sheet. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ options(contrasts = c("contr.sum", "contr.poly")) npk.aov <- aov(yield ~ block + N*P*K, npk) npk.aov summary(npk.aov) alias(npk.aov) coef(npk.aov) options(contrasts = c("contr.treatment", "contr.poly")) npk.aov1 <- aov(yield ~ block + N + K, data = npk) summary.lm(npk.aov1) se.contrast(npk.aov1, list(N=="0", N=="1"), data = npk) ## IGNORE_RDIFF_BEGIN model.tables(npk.aov1, type = "means", se = TRUE) ## IGNORE_RDIFF_END} \keyword{datasets} MASS/man/summary.rlm.Rd0000644000176000001440000000437512463405616014415 0ustar ripleyusers% file MASS/man/summary.rlm.Rd % copyright (C) 1994-2014 W. N. Venables and B. D. Ripley % \name{summary.rlm} \alias{summary.rlm} \alias{print.summary.rlm} \title{ Summary Method for Robust Linear Models } \description{ \code{summary} method for objects of class \code{"rlm"} } \usage{ \method{summary}{rlm}(object, method = c("XtX", "XtWX"), correlation = FALSE, \dots) } \arguments{ \item{object}{ the fitted model. This is assumed to be the result of some fit that produces an object inheriting from the class \code{rlm}, in the sense that the components returned by the \code{rlm} function will be available. } \item{method}{ Should the weighted (by the IWLS weights) or unweighted cross-products matrix be used? } \item{correlation}{ logical. Should correlations be computed (and printed)? } \item{\dots}{ arguments passed to or from other methods. }} \value{ If printing takes place, only a null value is returned. Otherwise, a list is returned with the following components. Printing always takes place if this function is invoked automatically as a method for the \code{summary} function. \item{correlation}{ The computed correlation coefficient matrix for the coefficients in the model. } \item{cov.unscaled}{ The unscaled covariance matrix; i.e, a matrix such that multiplying it by an estimate of the error variance produces an estimated covariance matrix for the coefficients. } \item{sigma}{ The scale estimate. } \item{stddev}{ A scale estimate used for the standard errors. } \item{df}{ The number of degrees of freedom for the model and for residuals. } \item{coefficients}{ A matrix with three columns, containing the coefficients, their standard errors and the corresponding t statistic. } \item{terms}{ The terms object used in fitting this model. }} \details{ This function is a method for the generic function \code{summary()} for class \code{"rlm"}. It can be invoked by calling \code{summary(x)} for an object \code{x} of the appropriate class, or directly by calling \code{summary.rlm(x)} regardless of the class of the object. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{summary}} } \examples{ summary(rlm(calls ~ year, data = phones, maxit = 50)) } \keyword{robust} MASS/man/ginv.Rd0000644000176000001440000000132313361334240013050 0ustar ripleyusers% file MASS/man/ginv.Rd % copyright (C) 1994-2018 W. N. Venables and B. D. Ripley % \name{ginv} \alias{ginv} \title{ Generalized Inverse of a Matrix } \description{ Calculates the Moore-Penrose generalized inverse of a matrix \code{X}. } \usage{ ginv(X, tol = sqrt(.Machine$double.eps)) } \arguments{ \item{X}{ Matrix for which the Moore-Penrose inverse is required. } \item{tol}{ A relative tolerance to detect zero singular values. }} \value{ A MP generalized inverse matrix for \code{X}. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. p.100. } \seealso{ \code{\link{solve}}, \code{\link{svd}}, \code{\link{eigen}} } \keyword{algebra} MASS/man/theta.md.Rd0000644000176000001440000000454312171733033013621 0ustar ripleyusers% file MASS/man/theta.md.Rd % copyright (C) 1994-2005 W. N. Venables and B. D. Ripley % \name{theta.md} \alias{theta.md} \alias{theta.ml} \alias{theta.mm} \title{ Estimate theta of the Negative Binomial } \description{ Given the estimated mean vector, estimate \code{theta} of the Negative Binomial Distribution. } \usage{ theta.md(y, mu, dfr, weights, limit = 20, eps = .Machine$double.eps^0.25) theta.ml(y, mu, n, weights, limit = 10, eps = .Machine$double.eps^0.25, trace = FALSE) theta.mm(y, mu, dfr, weights, limit = 10, eps = .Machine$double.eps^0.25) } \arguments{ \item{y}{ Vector of observed values from the Negative Binomial. } \item{mu}{ Estimated mean vector. } \item{n}{ Number of data points (defaults to the sum of \code{weights}) } \item{dfr}{ Residual degrees of freedom (assuming \code{theta} known). For a weighted fit this is the sum of the weights minus the number of fitted parameters. } \item{weights}{ Case weights. If missing, taken as 1. } \item{limit}{ Limit on the number of iterations. } \item{eps}{ Tolerance to determine convergence. } \item{trace}{ logical: should iteration progress be printed? } } \details{ \code{theta.md} estimates by equating the deviance to the residual degrees of freedom, an analogue of a moment estimator. \code{theta.ml} uses maximum likelihood. \code{theta.mm} calculates the moment estimator of \code{theta} by equating the Pearson chi-square \eqn{\sum (y-\mu)^2/(\mu+\mu^2/\theta)}{sum((y-mu)^2/(mu+mu^2/theta))} to the residual degrees of freedom. } \value{ The required estimate of \code{theta}, as a scalar. For \code{theta.ml}, the standard error is given as attribute \code{"SE"}. } \seealso{ \code{\link{glm.nb}} } \examples{ quine.nb <- glm.nb(Days ~ .^2, data = quine) theta.md(quine$Days, fitted(quine.nb), dfr = df.residual(quine.nb)) theta.ml(quine$Days, fitted(quine.nb)) theta.mm(quine$Days, fitted(quine.nb), dfr = df.residual(quine.nb)) ## weighted example yeast <- data.frame(cbind(numbers = 0:5, fr = c(213, 128, 37, 18, 3, 1))) fit <- glm.nb(numbers ~ 1, weights = fr, data = yeast) summary(fit) mu <- fitted(fit) theta.md(yeast$numbers, mu, dfr = 399, weights = yeast$fr) theta.ml(yeast$numbers, mu, limit = 15, weights = yeast$fr) theta.mm(yeast$numbers, mu, dfr = 399, weights = yeast$fr) } \keyword{models} MASS/man/ucv.Rd0000644000176000001440000000155311754562034012717 0ustar ripleyusers% file MASS/man/ucv.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{ucv} \alias{ucv} \title{ Unbiased Cross-Validation for Bandwidth Selection } \description{ Uses unbiased cross-validation to select the bandwidth of a Gaussian kernel density estimator. } \usage{ ucv(x, nb = 1000, lower, upper) } \arguments{ \item{x}{ a numeric vector } \item{nb}{ number of bins to use. } \item{lower, upper}{ Range over which to minimize. The default is almost always satisfactory. }} \value{ a bandwidth. } \references{ Scott, D. W. (1992) \emph{Multivariate Density Estimation: Theory, Practice, and Visualization.} Wiley. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{bcv}}, \code{\link{width.SJ}}, \code{\link{density}} } \examples{ ucv(geyser$duration) } \keyword{dplot} MASS/man/glmmPQL.Rd0000644000176000001440000000443713243747121013434 0ustar ripleyusers\name{glmmPQL} \alias{glmmPQL} \title{ Fit Generalized Linear Mixed Models via PQL } \description{ Fit a GLMM model with multivariate normal random effects, using Penalized Quasi-Likelihood. } \usage{ glmmPQL(fixed, random, family, data, correlation, weights, control, niter = 10, verbose = TRUE, \dots) } \arguments{ \item{fixed}{ a two-sided linear formula giving fixed-effects part of the model. } \item{random}{ a formula or list of formulae describing the random effects. } \item{family}{ a GLM family. } \item{data}{ an optional data frame used as the first place to find variables in the formulae, \code{weights} and if present in \code{\dots}, \code{subset}. } \item{correlation}{ an optional correlation structure. } \item{weights}{ optional case weights as in \code{glm}. } \item{control}{ an optional argument to be passed to \code{lme}. } \item{niter}{ maximum number of iterations. } \item{verbose}{ logical: print out record of iterations? } \item{\dots}{ Further arguments for \code{lme}. }} \value{ A object of class \code{"lme"}: see \code{\link[nlme]{lmeObject}}. } \details{ \code{glmmPQL} works by repeated calls to \code{\link[nlme]{lme}}, so package \code{nlme} will be loaded at first use if necessary. } \references{ Schall, R. (1991) Estimation in generalized linear models with random effects. \emph{Biometrika} \bold{78}, 719--727. Breslow, N. E. and Clayton, D. G. (1993) Approximate inference in generalized linear mixed models. \emph{Journal of the American Statistical Association} \bold{88}, 9--25. Wolfinger, R. and O'Connell, M. (1993) Generalized linear mixed models: a pseudo-likelihood approach. \emph{Journal of Statistical Computation and Simulation} \bold{48}, 233--243. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link[nlme]{lme}} } \examples{ library(nlme) # will be loaded automatically if omitted summary(glmmPQL(y ~ trt + I(week > 2), random = ~ 1 | ID, family = binomial, data = bacteria)) \dontshow{ # an example of offset summary(glmmPQL(y ~ trt + week, random = ~ 1 | ID, family = binomial, data = bacteria)) summary(glmmPQL(y ~ trt + week + offset(week), random = ~ 1 | ID, family = binomial, data = bacteria)) }} \keyword{models} MASS/man/immer.Rd0000644000176000001440000000255511754562034013236 0ustar ripleyusers% file MASS/man/immer.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{immer} \alias{immer} \title{ Yields from a Barley Field Trial } \description{ The \code{immer} data frame has 30 rows and 4 columns. Five varieties of barley were grown in six locations in each of 1931 and 1932. } \usage{ immer } \format{ This data frame contains the following columns: \describe{ \item{\code{Loc}}{ The location. } \item{\code{Var}}{ The variety of barley (\code{"manchuria"}, \code{"svansota"}, \code{"velvet"}, \code{"trebi"} and \code{"peatland"}). } \item{\code{Y1}}{ Yield in 1931. } \item{\code{Y2}}{ Yield in 1932. } } } \source{ Immer, F.R., Hayes, H.D. and LeRoy Powers (1934) Statistical determination of barley varietal adaptation. \emph{Journal of the American Society for Agronomy} \bold{26}, 403--419. Fisher, R.A. (1947) \emph{The Design of Experiments.} 4th edition. Edinburgh: Oliver and Boyd. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \examples{ immer.aov <- aov(cbind(Y1,Y2) ~ Loc + Var, data = immer) summary(immer.aov) immer.aov <- aov((Y1+Y2)/2 ~ Var + Loc, data = immer) summary(immer.aov) model.tables(immer.aov, type = "means", se = TRUE, cterms = "Var") } \keyword{datasets} MASS/man/width.SJ.Rd0000644000176000001440000000271413054512471013547 0ustar ripleyusers% file MASS/man/width.SJ.Rd % copyright (C) 1994-2017 W. N. Venables and B. D. Ripley % \name{width.SJ} \alias{width.SJ} \title{ Bandwidth Selection by Pilot Estimation of Derivatives } \description{ Uses the method of Sheather & Jones (1991) to select the bandwidth of a Gaussian kernel density estimator. } \usage{ width.SJ(x, nb = 1000, lower, upper, method = c("ste", "dpi")) } \arguments{ \item{x}{ a numeric vector } \item{nb}{ number of bins to use. } \item{upper, lower}{ range over which to search for solution if \code{method = "ste"}. } \item{method}{ Either \code{"ste"} ("solve-the-equation") or \code{"dpi"} ("direct plug-in"). }} \value{ a bandwidth. } \note{ A faster version for large \code{n} (thousands) is available in \R \eqn{\ge}{>=} 3.4.0 as part of \code{\link{bw.SJ}}: quadruple its value for comparability with this version. } \references{ Sheather, S. J. and Jones, M. C. (1991) A reliable data-based bandwidth selection method for kernel density estimation. \emph{Journal of the Royal Statistical Society series B} \bold{53}, 683--690. Scott, D. W. (1992) \emph{Multivariate Density Estimation: Theory, Practice, and Visualization.} Wiley. Wand, M. P. and Jones, M. C. (1995) \emph{Kernel Smoothing.} Chapman & Hall. } \seealso{ \code{\link{ucv}}, \code{\link{bcv}}, \code{\link{density}} } \examples{ width.SJ(geyser$duration, method = "dpi") width.SJ(geyser$duration) width.SJ(galaxies, method = "dpi") width.SJ(galaxies) } \keyword{dplot} MASS/man/michelson.Rd0000644000176000001440000000214711754562034014103 0ustar ripleyusers% file MASS/man/michelson.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{michelson} \alias{michelson} \title{ Michelson's Speed of Light Data } \description{ Measurements of the speed of light in air, made between 5th June and 2nd July, 1879. The data consists of five experiments, each consisting of 20 consecutive runs. The response is the speed of light in km/s, less 299000. The currently accepted value, on this scale of measurement, is 734.5. } \usage{ michelson } \format{ The data frame contains the following components: \describe{ \item{\code{Expt}}{ The experiment number, from 1 to 5. } \item{\code{Run}}{ The run number within each experiment. } \item{\code{Speed}}{ Speed-of-light measurement. } } } \source{ A.J. Weekes (1986) \emph{A Genstat Primer.} Edward Arnold. S. M. Stigler (1977) Do robust estimators work with real data? \emph{Annals of Statistics} \bold{5}, 1055--1098. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/abbey.Rd0000644000176000001440000000076711754562034013212 0ustar ripleyusers% file MASS/man/abbey.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{abbey} \alias{abbey} \title{ Determinations of Nickel Content } \description{ A numeric vector of 31 determinations of nickel content (ppm) in a Canadian syenite rock. } \usage{ abbey } \source{ S. Abbey (1988) \emph{Geostandards Newsletter} \bold{12}, 241. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/parcoord.Rd0000644000176000001440000000234611754562034013734 0ustar ripleyusers\name{parcoord} \alias{parcoord} \title{ Parallel Coordinates Plot } \description{ Parallel coordinates plot } \usage{ parcoord(x, col = 1, lty = 1, var.label = FALSE, \dots) } \arguments{ \item{x}{ a matrix or data frame who columns represent variables. Missing values are allowed. } \item{col}{ A vector of colours, recycled as necessary for each observation. } \item{lty}{ A vector of line types, recycled as necessary for each observation. } \item{var.label}{ If \code{TRUE}, each variable's axis is labelled with maximum and minimum values. } \item{\dots}{ Further graphics parameters which are passed to \code{matplot}. }} \section{Side Effects}{ a parallel coordinates plots is drawn. } \references{ Wegman, E. J. (1990) Hyperdimensional data analysis using parallel coordinates. \emph{Journal of the American Statistical Association} \bold{85}, 664--675. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \author{ B. D. Ripley. Enhancements based on ideas and code by Fabian Scheipl. } \examples{ parcoord(state.x77[, c(7, 4, 6, 2, 5, 3)]) ir <- rbind(iris3[,,1], iris3[,,2], iris3[,,3]) parcoord(log(ir)[, c(3, 4, 2, 1)], col = 1 + (0:149)\%/\%50) } \keyword{hplot} MASS/man/Cars93.Rd0000644000176000001440000000661313361334051013160 0ustar ripleyusers% file MASS/man/Cars93.Rd % copyright (C) 1994-2018 W. N. Venables and B. D. Ripley % \name{Cars93} \alias{Cars93} \title{ Data from 93 Cars on Sale in the USA in 1993 } \description{ The \code{Cars93} data frame has 93 rows and 27 columns. } \usage{ Cars93 } \format{ This data frame contains the following columns: \describe{ \item{\code{Manufacturer}}{ Manufacturer. } \item{\code{Model}}{ Model. } \item{\code{Type}}{ Type: a factor with levels \code{"Small"}, \code{"Sporty"}, \code{"Compact"}, \code{"Midsize"}, \code{"Large"} and \code{"Van"}. } \item{\code{Min.Price}}{ Minimum Price (in \$1,000): price for a basic version. } \item{\code{Price}}{ Midrange Price (in \$1,000): average of \code{Min.Price} and \code{Max.Price}. } \item{\code{Max.Price}}{ Maximum Price (in \$1,000): price for \dQuote{a premium version}. } \item{\code{MPG.city}}{ City MPG (miles per US gallon by EPA rating). } \item{\code{MPG.highway}}{ Highway MPG. } \item{\code{AirBags}}{ Air Bags standard. Factor: none, driver only, or driver & passenger. } \item{\code{DriveTrain}}{ Drive train type: rear wheel, front wheel or 4WD; (factor). } \item{\code{Cylinders}}{ Number of cylinders (missing for Mazda RX-7, which has a rotary engine). } \item{\code{EngineSize}}{ Engine size (litres). } \item{\code{Horsepower}}{ Horsepower (maximum). } \item{\code{RPM}}{ RPM (revs per minute at maximum horsepower). } \item{\code{Rev.per.mile}}{ Engine revolutions per mile (in highest gear). } \item{\code{Man.trans.avail}}{ Is a manual transmission version available? (yes or no, Factor). } \item{\code{Fuel.tank.capacity}}{ Fuel tank capacity (US gallons). } \item{\code{Passengers}}{ Passenger capacity (persons) } \item{\code{Length}}{ Length (inches). } \item{\code{Wheelbase}}{ Wheelbase (inches). } \item{\code{Width}}{ Width (inches). } \item{\code{Turn.circle}}{ U-turn space (feet). } \item{\code{Rear.seat.room}}{ Rear seat room (inches) (missing for 2-seater vehicles). } \item{\code{Luggage.room}}{ Luggage capacity (cubic feet) (missing for vans). } \item{\code{Weight}}{ Weight (pounds). } \item{\code{Origin}}{ Of non-USA or USA company origins? (factor). } \item{\code{Make}}{ Combination of Manufacturer and Model (character). } } } \details{ Cars were selected at random from among 1993 passenger car models that were listed in both the \emph{Consumer Reports} issue and the \emph{PACE Buying Guide}. Pickup trucks and Sport/Utility vehicles were eliminated due to incomplete information in the \emph{Consumer Reports} source. Duplicate models (e.g., Dodge Shadow and Plymouth Sundance) were listed at most once. Further description can be found in Lock (1993). } \source{ Lock, R. H. (1993) 1993 New Car Data. \emph{Journal of Statistics Education} \bold{1}(1). \url{ https://doi.org/10.1080/10691898.1993.11910459} % \url{http://www.amstat.org/publications/jse/v1n1/datasets.lock.html}. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/Aids2.Rd0000644000176000001440000000237311754562034013065 0ustar ripleyusers% file MASS/man/Aids2.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Aids2} \alias{Aids2} \title{ Australian AIDS Survival Data } \description{ Data on patients diagnosed with AIDS in Australia before 1 July 1991. } \usage{ Aids2 } \format{ This data frame contains 2843 rows and the following columns: \describe{ \item{\code{state}}{ Grouped state of origin: \code{"NSW "}includes ACT and \code{"other"} is WA, SA, NT and TAS. } \item{\code{sex}}{ Sex of patient. } \item{\code{diag}}{(Julian) date of diagnosis.} \item{\code{death}}{ (Julian) date of death or end of observation. } \item{\code{status}}{ \code{"A"} (alive) or \code{"D"} (dead) at end of observation. } \item{\code{T.categ}}{ Reported transmission category. } \item{\code{age}}{ Age (years) at diagnosis. } } } \note{ This data set has been slightly jittered as a condition of its release, to ensure patient confidentiality. } \source{ Dr P. J. Solomon and the Australian National Centre in HIV Epidemiology and Clinical Research. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/gehan.Rd0000644000176000001440000000303611754562034013202 0ustar ripleyusers% file MASS/man/gehan.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{gehan} \alias{gehan} \title{ Remission Times of Leukaemia Patients } \description{ A data frame from a trial of 42 leukaemia patients. Some were treated with the drug \emph{6-mercaptopurine} and the rest are controls. The trial was designed as matched pairs, both withdrawn from the trial when either came out of remission. } \usage{ gehan } \format{ This data frame contains the following columns: \describe{ \item{\code{pair}}{ label for pair. } \item{\code{time}}{ remission time in weeks. } \item{\code{cens}}{ censoring, 0/1. } \item{\code{treat}}{ treatment, control or 6-MP. } } } \source{ Cox, D. R. and Oakes, D. (1984) \emph{Analysis of Survival Data.} Chapman & Hall, p. 7. Taken from Gehan, E.A. (1965) A generalized Wilcoxon test for comparing arbitrarily single-censored samples. \emph{Biometrika} \bold{52}, 203--233. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ library(survival) gehan.surv <- survfit(Surv(time, cens) ~ treat, data = gehan, conf.type = "log-log") summary(gehan.surv) survreg(Surv(time, cens) ~ factor(pair) + treat, gehan, dist = "exponential") summary(survreg(Surv(time, cens) ~ treat, gehan, dist = "exponential")) summary(survreg(Surv(time, cens) ~ treat, gehan)) gehan.cox <- coxph(Surv(time, cens) ~ treat, gehan) summary(gehan.cox) } \keyword{datasets} MASS/man/DDT.Rd0000644000176000001440000000121711754562034012532 0ustar ripleyusers% file MASS/man/DDT.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{DDT} \alias{DDT} \title{ DDT in Kale } \description{ A numeric vector of 15 measurements by different laboratories of the pesticide DDT in kale, in ppm (parts per million) using the multiple pesticide residue measurement. } \usage{ DDT } \source{ C. E. Finsterwalder (1976) Collaborative study of an extension of the Mills \emph{et al} method for the determination of pesticide residues in food. \emph{J. Off. Anal. Chem.} \bold{59}, 169--171 R. G. Staudte and S. J. Sheather (1990) \emph{Robust Estimation and Testing.} Wiley } \keyword{datasets} MASS/man/geyser.Rd0000644000176000001440000000245312463405616013420 0ustar ripleyusers% file MASS/man/geyser.Rd % copyright (C) 1994-2014 W. N. Venables and B. D. Ripley % \name{geyser} \alias{geyser} \usage{ geyser } \title{Old Faithful Geyser Data} \description{ A version of the eruptions data from the \sQuote{Old Faithful} geyser in Yellowstone National Park, Wyoming. This version comes from Azzalini and Bowman (1990) and is of continuous measurement from August 1 to August 15, 1985. Some nocturnal duration measurements were coded as 2, 3 or 4 minutes, having originally been described as \sQuote{short}, \sQuote{medium} or \sQuote{long}. } \format{A data frame with 299 observations on 2 variables. \tabular{lll}{ \code{duration} \tab numeric \tab Eruption time in mins \cr \code{waiting} \tab numeric \tab Waiting time for this eruption \cr } } \note{ The \code{waiting} time was incorrectly described as the time to the next eruption in the original files, and corrected for \pkg{MASS} version 7.3-30. } \seealso{ \code{\link{faithful}}. CRAN package \pkg{sm}. } \references{ Azzalini, A. and Bowman, A. W. (1990) A look at some data on the Old Faithful geyser. \emph{Applied Statistics} \bold{39}, 357--365. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/painters.Rd0000644000176000001440000000311411754562034013742 0ustar ripleyusers% file MASS/man/painters.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{painters} \alias{painters} \title{ The Painter's Data of de Piles } \description{ The subjective assessment, on a 0 to 20 integer scale, of 54 classical painters. The painters were assessed on four characteristics: composition, drawing, colour and expression. The data is due to the Eighteenth century art critic, de Piles. } \usage{ painters } \format{ The row names of the data frame are the painters. The components are: \describe{ \item{\code{Composition}}{ Composition score. } \item{\code{Drawing}}{ Drawing score. } \item{\code{Colour}}{ Colour score. } \item{\code{Expression}}{ Expression score. } \item{\code{School}}{ The school to which a painter belongs, as indicated by a factor level code as follows: \code{"A"}: Renaissance; \code{"B"}: Mannerist; \code{"C"}: Seicento; \code{"D"}: Venetian; \code{"E"}: Lombard; \code{"F"}: Sixteenth Century; \code{"G"}: Seventeenth Century; \code{"H"}: French. } } } \source{ A. J. Weekes (1986) \emph{A Genstat Primer.} Edward Arnold. M. Davenport and G. Studdert-Kennedy (1972) The statistical analysis of aesthetic judgement: an exploration. \emph{Applied Statistics} \bold{21}, 324--333. I. T. Jolliffe (1986) \emph{Principal Component Analysis.} Springer. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/negative.binomial.Rd0000644000176000001440000000253711754562034015520 0ustar ripleyusers% file MASS/man/negative.binomial.Rd % copyright (C) 1994-2006 W. N. Venables and B. D. Ripley % \name{negative.binomial} \alias{negative.binomial} \title{ Family function for Negative Binomial GLMs } \description{ Specifies the information required to fit a Negative Binomial generalized linear model, with known \code{theta} parameter, using \code{glm()}. } \usage{ negative.binomial(theta = stop("'theta' must be specified"), link = "log") } \arguments{ \item{theta}{ The known value of the additional parameter, \code{theta}. } \item{link}{ The link function, as a character string, name or one-element character vector specifying one of \code{log}, \code{sqrt} or \code{identity}, or an object of class \code{"\link[=family]{link-glm}"}. } } \value{ An object of class \code{"family"}, a list of functions and expressions needed by \code{glm()} to fit a Negative Binomial generalized linear model. } \seealso{ \code{\link{glm.nb}}, \code{\link{anova.negbin}}, \code{\link{summary.negbin}} } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \examples{ # Fitting a Negative Binomial model to the quine data # with theta = 2 assumed known. # glm(Days ~ .^4, family = negative.binomial(2), data = quine) } \keyword{regression} \keyword{models} MASS/man/VA.Rd0000644000176000001440000000202511754562034012423 0ustar ripleyusers\name{VA} \alias{VA} \title{ Veteran's Administration Lung Cancer Trial } \description{ Veteran's Administration lung cancer trial from Kalbfleisch & Prentice. } \usage{ VA } \format{ A data frame with columns: \describe{ \item{\code{stime}}{ survival or follow-up time in days. } \item{\code{status}}{ dead or censored. } \item{\code{treat}}{ treatment: standard or test. } \item{\code{age}}{ patient's age in years. } \item{\code{Karn}}{ Karnofsky score of patient's performance on a scale of 0 to 100. } \item{\code{diag.time}}{ times since diagnosis in months at entry to trial. } \item{\code{cell}}{ one of four cell types. } \item{\code{prior}}{ prior therapy? } } } \source{ Kalbfleisch, J.D. and Prentice R.L. (1980) \emph{The Statistical Analysis of Failure Time Data.} Wiley. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/loglm1.Rd0000644000176000001440000000327411754562034013317 0ustar ripleyusers\name{loglm1} \alias{loglm1} \alias{loglm1.xtabs} \alias{loglm1.data.frame} \alias{loglm1.default} \title{ Fit Log-Linear Models by Iterative Proportional Scaling -- Internal function } \description{ \code{loglm1} is an internal function used by \code{\link{loglm}}. It is a generic function dispatching on the \code{data} argument. } \usage{ loglm1(formula, data, \dots) \method{loglm1}{xtabs}(formula, data, \dots) \method{loglm1}{data.frame}(formula, data, \dots) \method{loglm1}{default}(formula, data, start = rep(1, length(data)), fitted = FALSE, keep.frequencies = fitted, param = TRUE, eps = 1/10, iter = 40, print = FALSE, \dots) } \arguments{ \item{formula}{ A linear model formula specifying the log-linear model. See \code{\link{loglm}} for its interpretation.} \item{data}{ Numeric array or data frame. In the first case it specifies the array of frequencies; in then second it provides the data frame from which the variables occurring in the formula are preferentially obtained in the usual way. This argument may also be the result of a call to \code{\link{xtabs}}. } \item{start, param, eps, iter, print}{Arguments passed to \code{\link{loglin}}.} \item{fitted}{logical: should the fitted values be returned?} \item{keep.frequencies}{ If \code{TRUE} specifies that the (possibly constructed) array of frequencies is to be retained as part of the fitted model object. The default action is to use the same value as that used for \code{fitted}. } \item{\dots}{arguments passed to the default method.} } \value{ An object of class \code{"loglm"}. } \seealso{ \code{\link{loglm}}, \code{\link{loglin}} } \keyword{internal} MASS/man/OME.Rd0000644000176000001440000001525512463405616012546 0ustar ripleyusers% file MASS/man/OME.Rd % copyright (C) 1994-2014 W. N. Venables and B. D. Ripley % \name{OME} \alias{OME} \title{ Tests of Auditory Perception in Children with OME } \description{ Experiments were performed on children on their ability to differentiate a signal in broad-band noise. The noise was played from a pair of speakers and a signal was added to just one channel; the subject had to turn his/her head to the channel with the added signal. The signal was either coherent (the amplitude of the noise was increased for a period) or incoherent (independent noise was added for the same period to form the same increase in power). The threshold used in the original analysis was the stimulus loudness needs to get 75\% correct responses. Some of the children had suffered from otitis media with effusion (OME). } \usage{ OME } \format{ The \code{OME} data frame has 1129 rows and 7 columns: \describe{ \item{\code{ID}}{ Subject ID (1 to 99, with some IDs missing). A few subjects were measured at different ages. } \item{\code{OME}}{ \code{"low"} or \code{"high"} or \code{"N/A"} (at ages other than 30 and 60 months). } \item{\code{Age}}{ Age of the subject (months). } \item{\code{Loud}}{ Loudness of stimulus, in decibels. } \item{\code{Noise}}{ Whether the signal in the stimulus was \code{"coherent"} or \code{"incoherent"}. } \item{\code{Correct}}{ Number of correct responses from \code{Trials} trials. } \item{\code{Trials}}{ Number of trials performed. } } } \source{ Sarah Hogan, Dept of Physiology, University of Oxford, via Dept of Statistics Consulting Service } \section{Background}{ The experiment was to study otitis media with effusion (OME), a very common childhood condition where the middle ear space, which is normally air-filled, becomes congested by a fluid. There is a concomitant fluctuating, conductive hearing loss which can result in various language, cognitive and social deficits. The term \sQuote{binaural hearing} is used to describe the listening conditions in which the brain is processing information from both ears at the same time. The brain computes differences in the intensity and/or timing of signals arriving at each ear which contributes to sound localisation and also to our ability to hear in background noise. Some years ago, it was found that children of 7--8 years with a history of significant OME had significantly worse binaural hearing than children without such a history, despite having equivalent sensitivity. The question remained as to whether it was the timing, the duration, or the degree of severity of the otitis media episodes during critical periods, which affected later binaural hearing. In an attempt to begin to answer this question, 95 children were monitored for the presence of effusion every month since birth. On the basis of OME experience in their first two years, the test population was split into one group of high OME prevalence and one of low prevalence. } \examples{ # Fit logistic curve from p = 0.5 to p = 1.0 fp1 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/scal)), c("L75", "scal"), function(x,L75,scal)NULL) nls(Correct/Trials ~ fp1(Loud, L75, scal), data = OME, start = c(L75=45, scal=3)) nls(Correct/Trials ~ fp1(Loud, L75, scal), data = OME[OME$Noise == "coherent",], start=c(L75=45, scal=3)) nls(Correct/Trials ~ fp1(Loud, L75, scal), data = OME[OME$Noise == "incoherent",], start = c(L75=45, scal=3)) # individual fits for each experiment aa <- factor(OME$Age) ab <- 10*OME$ID + unclass(aa) ac <- unclass(factor(ab)) OME$UID <- as.vector(ac) OME$UIDn <- OME$UID + 0.1*(OME$Noise == "incoherent") rm(aa, ab, ac) OMEi <- OME library(nlme) fp2 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/2)), "L75", function(x,L75) NULL) dec <- getOption("OutDec") options(show.error.messages = FALSE, OutDec=".") OMEi.nls <- nlsList(Correct/Trials ~ fp2(Loud, L75) | UIDn, data = OMEi, start = list(L75=45), control = list(maxiter=100)) options(show.error.messages = TRUE, OutDec=dec) tmp <- sapply(OMEi.nls, function(X) {if(is.null(X)) NA else as.vector(coef(X))}) OMEif <- data.frame(UID = round(as.numeric((names(tmp)))), Noise = rep(c("coherent", "incoherent"), 110), L75 = as.vector(tmp), stringsAsFactors = TRUE) OMEif$Age <- OME$Age[match(OMEif$UID, OME$UID)] OMEif$OME <- OME$OME[match(OMEif$UID, OME$UID)] OMEif <- OMEif[OMEif$L75 > 30,] summary(lm(L75 ~ Noise/Age, data = OMEif, na.action = na.omit)) summary(lm(L75 ~ Noise/(Age + OME), data = OMEif, subset = (Age >= 30 & Age <= 60), na.action = na.omit), cor = FALSE) # Or fit by weighted least squares fpl75 <- deriv(~ sqrt(n)*(r/n - 0.5 - 0.5/(1 + exp(-(x-L75)/scal))), c("L75", "scal"), function(r,n,x,L75,scal) NULL) nls(0 ~ fpl75(Correct, Trials, Loud, L75, scal), data = OME[OME$Noise == "coherent",], start = c(L75=45, scal=3)) nls(0 ~ fpl75(Correct, Trials, Loud, L75, scal), data = OME[OME$Noise == "incoherent",], start = c(L75=45, scal=3)) # Test to see if the curves shift with age fpl75age <- deriv(~sqrt(n)*(r/n - 0.5 - 0.5/(1 + exp(-(x-L75-slope*age)/scal))), c("L75", "slope", "scal"), function(r,n,x,age,L75,slope,scal) NULL) OME.nls1 <- nls(0 ~ fpl75age(Correct, Trials, Loud, Age, L75, slope, scal), data = OME[OME$Noise == "coherent",], start = c(L75=45, slope=0, scal=2)) sqrt(diag(vcov(OME.nls1))) OME.nls2 <- nls(0 ~ fpl75age(Correct, Trials, Loud, Age, L75, slope, scal), data = OME[OME$Noise == "incoherent",], start = c(L75=45, slope=0, scal=2)) sqrt(diag(vcov(OME.nls2))) # Now allow random effects by using NLME OMEf <- OME[rep(1:nrow(OME), OME$Trials),] OMEf$Resp <- with(OME, rep(rep(c(1,0), length(Trials)), t(cbind(Correct, Trials-Correct)))) OMEf <- OMEf[, -match(c("Correct", "Trials"), names(OMEf))] \dontrun{## these fail in R on most platforms fp2 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/exp(lsc))), c("L75", "lsc"), function(x, L75, lsc) NULL) try(summary(nlme(Resp ~ fp2(Loud, L75, lsc), fixed = list(L75 ~ Age, lsc ~ 1), random = L75 + lsc ~ 1 | UID, data = OMEf[OMEf$Noise == "coherent",], method = "ML", start = list(fixed=c(L75=c(48.7, -0.03), lsc=0.24)), verbose = TRUE))) try(summary(nlme(Resp ~ fp2(Loud, L75, lsc), fixed = list(L75 ~ Age, lsc ~ 1), random = L75 + lsc ~ 1 | UID, data = OMEf[OMEf$Noise == "incoherent",], method = "ML", start = list(fixed=c(L75=c(41.5, -0.1), lsc=0)), verbose = TRUE))) }} \keyword{datasets} MASS/man/galaxies.Rd0000644000176000001440000000277611754562034013727 0ustar ripleyusers% file MASS/man/galaxies.Rd % copyright (C) 1994-2010 W. N. Venables and B. D. Ripley % \name{galaxies} \alias{galaxies} \title{ Velocities for 82 Galaxies } \description{ A numeric vector of velocities in km/sec of 82 galaxies from 6 well-separated conic sections of an \code{unfilled} survey of the Corona Borealis region. Multimodality in such surveys is evidence for voids and superclusters in the far universe. } \usage{ galaxies } \source{ Roeder, K. (1990) Density estimation with confidence sets exemplified by superclusters and voids in galaxies. \emph{Journal of the American Statistical Association} \bold{85}, 617--624. Postman, M., Huchra, J. P. and Geller, M. J. (1986) Probes of large-scale structures in the Corona Borealis region. \emph{Astronomical Journal} \bold{92}, 1238--1247. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \note{ There is an 83rd measurement of 5607 km/sec in the Postman \emph{et al.} paper which is omitted in Roeder (1990) and from the dataset here. There is also a typo: this dataset has 78th observation 26690 which should be 26960. } \examples{ gal <- galaxies/1000 c(width.SJ(gal, method = "dpi"), width.SJ(gal)) plot(x = c(0, 40), y = c(0, 0.3), type = "n", bty = "l", xlab = "velocity of galaxy (1000km/s)", ylab = "density") rug(gal) lines(density(gal, width = 3.25, n = 200), lty = 1) lines(density(gal, width = 2.56, n = 200), lty = 3) } \keyword{datasets} MASS/man/hist.scott.Rd0000644000176000001440000000151211754562034014217 0ustar ripleyusers\name{hist.scott} \alias{hist.scott} \alias{hist.FD} \title{ Plot a Histogram with Automatic Bin Width Selection } \description{ Plot a histogram with automatic bin width selection, using the Scott or Freedman--Diaconis formulae. } \usage{ hist.scott(x, prob = TRUE, xlab = deparse(substitute(x)), ...) hist.FD(x, prob = TRUE, xlab = deparse(substitute(x)), ...) } \arguments{ \item{x}{A data vector} \item{prob}{Should the plot have unit area, so be a density estimate?} \item{xlab, \dots}{Further arguments to \code{hist}.} } \value{ For the \code{nclass.*} functions, the suggested number of classes. } \section{Side Effects}{ Plot a histogram. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Springer. } \seealso{ \code{\link{hist}} } \keyword{hplot} \keyword{dplot} MASS/man/waders.Rd0000644000176000001440000000403211754562034013402 0ustar ripleyusers% file MASS/man/waders.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{waders} \alias{waders} \title{ Counts of Waders at 15 Sites in South Africa } \description{ The \code{waders} data frame has 15 rows and 19 columns. The entries are counts of waders in summer. } \usage{ waders } \format{ This data frame contains the following columns (species) \describe{ \item{\code{S1}}{ Oystercatcher } \item{\code{S2}}{ White-fronted Plover } \item{\code{S3}}{ Kitt Lutz's Plover } \item{\code{S4}}{ Three-banded Plover } \item{\code{S5}}{ Grey Plover } \item{\code{S6}}{ Ringed Plover } \item{\code{S7}}{ Bar-tailed Godwit } \item{\code{S8}}{ Whimbrel } \item{\code{S9}}{ Marsh Sandpiper } \item{\code{S10}}{ Greenshank } \item{\code{S11}}{ Common Sandpiper } \item{\code{S12}}{ Turnstone } \item{\code{S13}}{ Knot } \item{\code{S14}}{ Sanderling } \item{\code{S15}}{ Little Stint } \item{\code{S16}}{ Curlew Sandpiper } \item{\code{S17}}{ Ruff } \item{\code{S18}}{ Avocet } \item{\code{S19}}{ Black-winged Stilt } } The rows are the sites: A = Namibia North coast\cr B = Namibia North wetland\cr C = Namibia South coast\cr D = Namibia South wetland\cr E = Cape North coast\cr F = Cape North wetland\cr G = Cape West coast\cr H = Cape West wetland\cr I = Cape South coast\cr J= Cape South wetland\cr K = Cape East coast\cr L = Cape East wetland\cr M = Transkei coast\cr N = Natal coast\cr O = Natal wetland } \source{ J.C. Gower and D.J. Hand (1996) \emph{Biplots} Chapman & Hall Table 9.1. Quoted as from: R.W. Summers, L.G. Underhill, D.J. Pearson and D.A. Scott (1987) Wader migration systems in south and eastern Africa and western Asia. \emph{Wader Study Group Bulletin} \bold{49} Supplement, 15--34. } \examples{ plot(corresp(waders, nf=2)) } \keyword{datasets} MASS/man/Sitka.Rd0000644000176000001440000000224511754562034013174 0ustar ripleyusers% file MASS/man/Sitka.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Sitka} \alias{Sitka} \title{ Growth Curves for Sitka Spruce Trees in 1988 } \description{ The \code{Sitka} data frame has 395 rows and 4 columns. It gives repeated measurements on the log-size of 79 Sitka spruce trees, 54 of which were grown in ozone-enriched chambers and 25 were controls. The size was measured five times in 1988, at roughly monthly intervals. } \usage{ Sitka } \format{ This data frame contains the following columns: \describe{ \item{\code{size}}{measured size (height times diameter squared) of tree, on log scale.} \item{\code{Time}}{time of measurement in days since 1 January 1988.} \item{\code{tree}}{number of tree.} \item{\code{treat}}{either \code{"ozone"} for an ozone-enriched chamber or \code{"control"}.} } } \seealso{ \code{\link{Sitka89}}. } \source{ P. J. Diggle, K.-Y. Liang and S. L. Zeger (1994) \emph{Analysis of Longitudinal Data.} Clarendon Press, Oxford } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/predict.qda.Rd0000644000176000001440000000523711754562034014323 0ustar ripleyusers% file MASS/man/predict.qda.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{predict.qda} \alias{predict.qda} \title{ Classify from Quadratic Discriminant Analysis } \description{ Classify multivariate observations in conjunction with \code{qda} } \usage{ \method{predict}{qda}(object, newdata, prior = object$prior, method = c("plug-in", "predictive", "debiased", "looCV"), \dots) } \arguments{ \item{object}{ object of class \code{"qda"} } \item{newdata}{ data frame of cases to be classified or, if \code{object} has a formula, a data frame with columns of the same names as the variables used. A vector will be interpreted as a row vector. If newdata is missing, an attempt will be made to retrieve the data used to fit the \code{qda} object. } \item{prior}{ The prior probabilities of the classes, by default the proportions in the training set or what was set in the call to \code{qda}. } \item{method}{ This determines how the parameter estimation is handled. With \code{"plug-in"} (the default) the usual unbiased parameter estimates are used and assumed to be correct. With \code{"debiased"} an unbiased estimator of the log posterior probabilities is used, and with \code{"predictive"} the parameter estimates are integrated out using a vague prior. With \code{"looCV"} the leave-one-out cross-validation fits to the original dataset are computed and returned. } \item{\dots}{ arguments based from or to other methods }} \value{ a list with components \item{class}{ The MAP classification (a factor) } \item{posterior}{ posterior probabilities for the classes }} \details{ This function is a method for the generic function \code{predict()} for class \code{"qda"}. It can be invoked by calling \code{predict(x)} for an object \code{x} of the appropriate class, or directly by calling \code{predict.qda(x)} regardless of the class of the object. Missing values in \code{newdata} are handled by returning \code{NA} if the quadratic discriminants cannot be evaluated. If \code{newdata} is omitted and the \code{na.action} of the fit omitted cases, these will be omitted on the prediction. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. Ripley, B. D. (1996) \emph{Pattern Recognition and Neural Networks}. Cambridge University Press. } \seealso{ \code{\link{qda}}, \code{\link{lda}}, \code{\link{predict.lda}} } \examples{ tr <- sample(1:50, 25) train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) zq <- qda(train, cl) predict(zq, test)$class } \keyword{multivariate} MASS/man/minn38.Rd0000644000176000001440000000247411754562034013241 0ustar ripleyusers% file MASS/man/minn38.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{minn38} \alias{minn38} \title{ Minnesota High School Graduates of 1938 } \description{ The Minnesota high school graduates of 1938 were classified according to four factors, described below. The \code{minn38} data frame has 168 rows and 5 columns. } \usage{ minn38 } \format{ This data frame contains the following columns: \describe{ \item{\code{hs}}{ high school rank: \code{"L"}, \code{"M"} and \code{"U"} for lower, middle and upper third. } \item{\code{phs}}{ post high school status: Enrolled in college, (\code{"C"}), enrolled in non-collegiate school, (\code{"N"}), employed full-time, (\code{"E"}) and other, (\code{"O"}). } \item{\code{fol}}{ father's occupational level, (seven levels, \code{"F1"}, \code{"F2"}, \dots, \code{"F7"}). } \item{\code{sex}}{ sex: factor with levels\code{"F"} or \code{"M"}. } \item{\code{f}}{ frequency. } } } \source{ From R. L. Plackett, (1974) \emph{The Analysis of Categorical Data.} London: Griffin who quotes the data from Hoyt, C. J., Krishnaiah, P. R. and Torrance, E. P. (1959) Analysis of complex contingency tables, \emph{J. Exp. Ed.} \bold{27}, 187--194. } \keyword{datasets} MASS/man/UScereal.Rd0000644000176000001440000000327711754562034013632 0ustar ripleyusers% file MASS/man/UScereal.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{UScereal} \alias{UScereal} \title{ Nutritional and Marketing Information on US Cereals } \description{ The \code{UScereal} data frame has 65 rows and 11 columns. The data come from the 1993 ASA Statistical Graphics Exposition, and are taken from the mandatory F&DA food label. The data have been normalized here to a portion of one American cup. } \usage{ UScereal } \format{ This data frame contains the following columns: \describe{ \item{\code{mfr}}{ Manufacturer, represented by its first initial: G=General Mills, K=Kelloggs, N=Nabisco, P=Post, Q=Quaker Oats, R=Ralston Purina. } \item{\code{calories}}{ number of calories in one portion. } \item{\code{protein}}{ grams of protein in one portion. } \item{\code{fat}}{ grams of fat in one portion. } \item{\code{sodium}}{ milligrams of sodium in one portion. } \item{\code{fibre}}{ grams of dietary fibre in one portion. } \item{\code{carbo}}{ grams of complex carbohydrates in one portion. } \item{\code{sugars}}{ grams of sugars in one portion. } \item{\code{shelf}}{ display shelf (1, 2, or 3, counting from the floor). } \item{\code{potassium}}{ grams of potassium. } \item{\code{vitamins}}{ vitamins and minerals (none, enriched, or 100\%). } } } \source{ The original data are available at \url{http://lib.stat.cmu.edu/datasets/1993.expo/}. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/Skye.Rd0000644000176000001440000000363511754562034013040 0ustar ripleyusers% file MASS/man/Skye.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Skye} \alias{Skye} \title{ AFM Compositions of Aphyric Skye Lavas } \description{ The \code{Skye} data frame has 23 rows and 3 columns. } \usage{ Skye } \format{ This data frame contains the following columns: \describe{ \item{\code{A}}{ Percentage of sodium and potassium oxides. } \item{\code{F}}{ Percentage of iron oxide. } \item{\code{M}}{ Percentage of magnesium oxide. } } } \source{ R. N. Thompson, J. Esson and A. C. Duncan (1972) Major element chemical variation in the Eocene lavas of the Isle of Skye. \emph{J. Petrology}, \bold{13}, 219--253. } \references{ J. Aitchison (1986) \emph{The Statistical Analysis of Compositional Data.} Chapman and Hall, p.360. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ # ternary() is from the on-line answers. ternary <- function(X, pch = par("pch"), lcex = 1, add = FALSE, ord = 1:3, \dots) { X <- as.matrix(X) if(any(X < 0)) stop("X must be non-negative") s <- drop(X \%*\% rep(1, ncol(X))) if(any(s<=0)) stop("each row of X must have a positive sum") if(max(abs(s-1)) > 1e-6) { warning("row(s) of X will be rescaled") X <- X / s } X <- X[, ord] s3 <- sqrt(1/3) if(!add) { oldpty <- par("pty") on.exit(par(pty=oldpty)) par(pty="s") plot(c(-s3, s3), c(0.5-s3, 0.5+s3), type="n", axes=FALSE, xlab="", ylab="") polygon(c(0, -s3, s3), c(1, 0, 0), density=0) lab <- NULL if(!is.null(dn <- dimnames(X))) lab <- dn[[2]] if(length(lab) < 3) lab <- as.character(1:3) eps <- 0.05 * lcex text(c(0, s3+eps*0.7, -s3-eps*0.7), c(1+eps, -0.1*eps, -0.1*eps), lab, cex=lcex) } points((X[,2] - X[,3])*s3, X[,1], \dots) } ternary(Skye/100, ord=c(1,3,2)) } \keyword{datasets} MASS/man/muscle.Rd0000644000176000001440000000475613577077353013434 0ustar ripleyusers% file MASS/man/muscle.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{muscle} \alias{muscle} \title{ Effect of Calcium Chloride on Muscle Contraction in Rat Hearts } \description{ The purpose of this experiment was to assess the influence of calcium in solution on the contraction of heart muscle in rats. The left auricle of 21 rat hearts was isolated and on several occasions a constant-length strip of tissue was electrically stimulated and dipped into various concentrations of calcium chloride solution, after which the shortening of the strip was accurately measured as the response. } \usage{ muscle } \format{ This data frame contains the following columns: \describe{ \item{\code{Strip}}{ which heart muscle strip was used? } \item{\code{Conc}}{ concentration of calcium chloride solution, in multiples of 2.2 mM. } \item{\code{Length}}{ the change in length (shortening) of the strip, (allegedly) in mm. } } } \source{ Linder, A., Chakravarti, I. M. and Vuagnat, P. (1964) Fitting asymptotic regression curves with different asymptotes. In \emph{Contributions to Statistics. Presented to Professor P. C. Mahalanobis on the occasion of his 70th birthday}, ed. C. R. Rao, pp. 221--228. Oxford: Pergamon Press. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth Edition. Springer. } \examples{ ## IGNORE_RDIFF_BEGIN A <- model.matrix(~ Strip - 1, data=muscle) rats.nls1 <- nls(log(Length) ~ cbind(A, rho^Conc), data = muscle, start = c(rho=0.1), algorithm="plinear") (B <- coef(rats.nls1)) st <- list(alpha = B[2:22], beta = B[23], rho = B[1]) (rats.nls2 <- nls(log(Length) ~ alpha[Strip] + beta*rho^Conc, data = muscle, start = st)) ## IGNORE_RDIFF_END Muscle <- with(muscle, { Muscle <- expand.grid(Conc = sort(unique(Conc)), Strip = levels(Strip)) Muscle$Yhat <- predict(rats.nls2, Muscle) Muscle <- cbind(Muscle, logLength = rep(as.numeric(NA), 126)) ind <- match(paste(Strip, Conc), paste(Muscle$Strip, Muscle$Conc)) Muscle$logLength[ind] <- log(Length) Muscle}) lattice::xyplot(Yhat ~ Conc | Strip, Muscle, as.table = TRUE, ylim = range(c(Muscle$Yhat, Muscle$logLength), na.rm = TRUE), subscripts = TRUE, xlab = "Calcium Chloride concentration (mM)", ylab = "log(Length in mm)", panel = function(x, y, subscripts, ...) { panel.xyplot(x, Muscle$logLength[subscripts], ...) llines(spline(x, y)) }) } \keyword{datasets} MASS/man/Rubber.Rd0000644000176000001440000000156711754562034013350 0ustar ripleyusers% file MASS/man/Rubber.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Rubber} \alias{Rubber} \title{ Accelerated Testing of Tyre Rubber } \description{ Data frame from accelerated testing of tyre rubber. } \usage{ Rubber } \format{ \describe{ \item{\code{loss}}{ the abrasion loss in gm/hr. } \item{\code{hard}}{ the hardness in Shore units. } \item{\code{tens}}{ tensile strength in kg/sq m. } } } \source{ O.L. Davies (1947) \emph{Statistical Methods in Research and Production.} Oliver and Boyd, Table 6.1 p. 119. O.L. Davies and P.L. Goldsmith (1972) \emph{Statistical Methods in Research and Production.} 4th edition, Longmans, Table 8.1 p. 239. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/plot.lda.Rd0000644000176000001440000000353711754562034013643 0ustar ripleyusers% file MASS/man/plot.lda.Rd % copyright (C) 1998-9 W. N. Venables and B. D. Ripley % \name{plot.lda} \alias{plot.lda} \title{ Plot Method for Class 'lda' } \description{ Plots a set of data on one, two or more linear discriminants. } \usage{ \method{plot}{lda}(x, panel = panel.lda, \dots, cex = 0.7, dimen, abbrev = FALSE, xlab = "LD1", ylab = "LD2") } \arguments{ \item{x}{ An object of class \code{"lda"}. } \item{panel}{ the panel function used to plot the data. } \item{\dots}{ additional arguments to \code{pairs}, \code{ldahist} or \code{eqscplot}. } \item{cex}{ graphics parameter \code{cex} for labels on plots. } \item{dimen}{ The number of linear discriminants to be used for the plot; if this exceeds the number determined by \code{x} the smaller value is used. } \item{abbrev}{ whether the group labels are abbreviated on the plots. If \code{abbrev > 0} this gives \code{minlength} in the call to \code{abbreviate}. } \item{xlab}{ label for the x axis } \item{ylab}{ label for the y axis }} \details{ This function is a method for the generic function \code{plot()} for class \code{"lda"}. It can be invoked by calling \code{plot(x)} for an object \code{x} of the appropriate class, or directly by calling \code{plot.lda(x)} regardless of the class of the object. The behaviour is determined by the value of \code{dimen}. For \code{dimen > 2}, a \code{pairs} plot is used. For \code{dimen = 2}, an equiscaled scatter plot is drawn. For \code{dimen = 1}, a set of histograms or density plots are drawn. Use argument \code{type} to match \code{"histogram"} or \code{"density"} or \code{"both"}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{pairs.lda}}, \code{\link{ldahist}}, \code{\link{lda}}, \code{\link{predict.lda}} } \keyword{hplot} \keyword{multivariate} MASS/man/gamma.dispersion.Rd0000644000176000001440000000150411754562034015356 0ustar ripleyusers% file MASS/man/gamma.dispersion.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{gamma.dispersion} \alias{gamma.dispersion} \title{ Calculate the MLE of the Gamma Dispersion Parameter in a GLM Fit } \description{ A front end to \code{gamma.shape} for convenience. Finds the reciprocal of the estimate of the shape parameter only. } \usage{ gamma.dispersion(object, \dots) } \arguments{ \item{object}{ Fitted model object giving the gamma fit. } \item{\dots}{ Additional arguments passed on to \code{gamma.shape}. }} \value{ The MLE of the dispersion parameter of the gamma distribution. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{gamma.shape.glm}}, including the example on its help page. } \keyword{models} MASS/man/dropterm.Rd0000644000176000001440000000706711754562034013764 0ustar ripleyusers% file MASS/man/dropterm.Rd % copyright (C) 1998-9 W. N. Venables and B. D. Ripley % \name{dropterm} \alias{dropterm} \alias{dropterm.default} \alias{dropterm.glm} \alias{dropterm.lm} %\alias{dropterm.mlm} %\alias{dropterm.negbin} %\alias{dropterm.survreg} \title{ Try All One-Term Deletions from a Model } \description{ Try fitting all models that differ from the current model by dropping a single term, maintaining marginality. This function is generic; there exist methods for classes \code{lm} and \code{glm} and the default method will work for many other classes. } \usage{ dropterm (object, \dots) \method{dropterm}{default}(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, \dots) \method{dropterm}{lm}(object, scope, scale = 0, test = c("none", "Chisq", "F"), k = 2, sorted = FALSE, \dots) \method{dropterm}{glm}(object, scope, scale = 0, test = c("none", "Chisq", "F"), k = 2, sorted = FALSE, trace = FALSE, \dots) } \arguments{ \item{object}{ A object fitted by some model-fitting function. } \item{scope}{ a formula giving terms which might be dropped. By default, the model formula. Only terms that can be dropped and maintain marginality are actually tried. } \item{scale}{ used in the definition of the AIC statistic for selecting the models, currently only for \code{lm}, \code{aov} and \code{glm} models. Specifying \code{scale} asserts that the residual standard error or dispersion is known. } \item{test}{ should the results include a test statistic relative to the original model? The F test is only appropriate for \code{lm} and \code{aov} models, and perhaps for some over-dispersed \code{glm} models. The Chisq test can be an exact test (\code{lm} models with known scale) or a likelihood-ratio test depending on the method. } \item{k}{ the multiple of the number of degrees of freedom used for the penalty. Only \code{k = 2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC. } \item{sorted}{ should the results be sorted on the value of AIC? } \item{trace}{ if \code{TRUE} additional information may be given on the fits as they are tried. } \item{\dots}{ arguments passed to or from other methods. }} \value{ A table of class \code{"anova"} containing at least columns for the change in degrees of freedom and AIC (or Cp) for the models. Some methods will give further information, for example sums of squares, deviances, log-likelihoods and test statistics. } \details{ The definition of AIC is only up to an additive constant: when appropriate (\code{lm} models with specified scale) the constant is taken to be that used in Mallows' Cp statistic and the results are labelled accordingly. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{addterm}}, \code{\link{stepAIC}} } \examples{ quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) quine.nxt <- update(quine.hi, . ~ . - Eth:Sex:Age:Lrn) dropterm(quine.nxt, test= "F") quine.stp <- stepAIC(quine.nxt, scope = list(upper = ~Eth*Sex*Age*Lrn, lower = ~1), trace = FALSE) dropterm(quine.stp, test = "F") quine.3 <- update(quine.stp, . ~ . - Eth:Age:Lrn) dropterm(quine.3, test = "F") quine.4 <- update(quine.3, . ~ . - Eth:Age) dropterm(quine.4, test = "F") quine.5 <- update(quine.4, . ~ . - Age:Lrn) dropterm(quine.5, test = "F") house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family=poisson, data = housing) house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) dropterm(house.glm1, test = "Chisq") } \keyword{models} MASS/man/huber.Rd0000644000176000001440000000144211754562034013224 0ustar ripleyusers% file MASS/man/huber.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{huber} \alias{huber} \title{ Huber M-estimator of Location with MAD Scale } \description{ Finds the Huber M-estimator of location with MAD scale. } \usage{ huber(y, k = 1.5, tol = 1e-06) } \arguments{ \item{y}{ vector of data values } \item{k}{ Winsorizes at \code{k} standard deviations } \item{tol}{ convergence tolerance }} \value{ list of location and scale parameters \item{mu}{ location estimate } \item{s}{ MAD scale estimate }} \references{ Huber, P. J. (1981) \emph{Robust Statistics.} Wiley. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{hubers}}, \code{\link{mad}} } \examples{ huber(chem) } \keyword{robust} MASS/man/shuttle.Rd0000644000176000001440000000277211754562034013616 0ustar ripleyusers% file MASS/man/shuttle.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{shuttle} \alias{shuttle} \title{ Space Shuttle Autolander Problem } \description{ The \code{shuttle} data frame has 256 rows and 7 columns. The first six columns are categorical variables giving example conditions; the seventh is the decision. The first 253 rows are the training set, the last 3 the test conditions. } \usage{ shuttle } \format{ This data frame contains the following factor columns: \describe{ \item{\code{stability}}{ stable positioning or not (\code{stab} / \code{xstab}). } \item{\code{error}}{ size of error (\code{MM} / \code{SS} / \code{LX} / \code{XL}). } \item{\code{sign}}{ sign of error, positive or negative (\code{pp} / \code{nn}). } \item{\code{wind}}{ wind sign (\code{head} / \code{tail}). } \item{\code{magn}}{ wind strength (\code{Light} / \code{Medium} / \code{Strong} / \code{Out of Range}). } \item{\code{vis}}{ visibility (\code{yes} / \code{no}). } \item{\code{use}}{ use the autolander or not. (\code{auto} / \code{noauto}.) } } } \source{ D. Michie (1989) Problems of computer-aided concept formation. In \emph{Applications of Expert Systems 2}, ed. J. R. Quinlan, Turing Institute Press / Addison-Wesley, pp. 310--333. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/survey.Rd0000644000176000001440000000400211754562034013447 0ustar ripleyusers% file MASS/man/survey.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{survey} \alias{survey} \title{ Student Survey Data } \description{ This data frame contains the responses of 237 Statistics I students at the University of Adelaide to a number of questions. } \usage{ survey } \format{ The components of the data frame are: \describe{ \item{\code{Sex}}{ The sex of the student. (Factor with levels \code{"Male"} and \code{"Female"}.) } \item{\code{Wr.Hnd}}{ span (distance from tip of thumb to tip of little finger of spread hand) of writing hand, in centimetres. } \item{\code{NW.Hnd}}{ span of non-writing hand. } \item{\code{W.Hnd}}{ writing hand of student. (Factor, with levels \code{"Left"} and \code{"Right"}.) } \item{\code{Fold}}{ \dQuote{Fold your arms! Which is on top} (Factor, with levels \code{"R on L"}, \code{"L on R"}, \code{"Neither"}.) } \item{\code{Pulse}}{ pulse rate of student (beats per minute). } \item{\code{Clap}}{ \sQuote{Clap your hands! Which hand is on top?} (Factor, with levels \code{"Right"}, \code{"Left"}, \code{"Neither"}.) } \item{\code{Exer}}{ how often the student exercises. (Factor, with levels \code{"Freq"} (frequently), \code{"Some"}, \code{"None"}.) } \item{\code{Smoke}}{ how much the student smokes. (Factor, levels \code{"Heavy"}, \code{"Regul"} (regularly), \code{"Occas"} (occasionally), \code{"Never"}.) } \item{\code{Height}}{ height of the student in centimetres. } \item{\code{M.I}}{ whether the student expressed height in imperial (feet/inches) or metric (centimetres/metres) units. (Factor, levels \code{"Metric"}, \code{"Imperial"}.) } \item{\code{Age}}{ age of the student in years. } } } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/predict.lqs.Rd0000644000176000001440000000323611754562034014352 0ustar ripleyusers% file lqs/man//predict.lqs.Rd % copyright (C) 1999 B. D. Ripley % \name{predict.lqs} \alias{predict.lqs} \title{ Predict from an lqs Fit } \description{ Predict from an resistant regression fitted by \code{lqs}. } \usage{ \method{predict}{lqs}(object, newdata, na.action = na.pass, \dots) } \arguments{ \item{object}{ object inheriting from class \code{"lqs"} } \item{newdata}{ matrix or data frame of cases to be predicted or, if \code{object} has a formula, a data frame with columns of the same names as the variables used. A vector will be interpreted as a row vector. If \code{newdata} is missing, an attempt will be made to retrieve the data used to fit the \code{lqs} object. } \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{\dots}{arguments to be passed from or to other methods.} } \value{ A vector of predictions. } \details{ This function is a method for the generic function \code{predict()} for class \code{lqs}. It can be invoked by calling \code{predict(x)} for an object \code{x} of the appropriate class, or directly by calling \code{predict.lqs(x)} regardless of the class of the object. Missing values in \code{newdata} are handled by returning \code{NA} if the linear fit cannot be evaluated. If \code{newdata} is omitted and the \code{na.action} of the fit omitted cases, these will be omitted on the prediction. } \author{B.D. Ripley} \seealso{ \code{\link{lqs}} } \examples{ set.seed(123) fm <- lqs(stack.loss ~ ., data = stackloss, method = "S", nsamp = "exact") predict(fm, stackloss) } \keyword{models} MASS/man/housing.Rd0000644000176000001440000000650713577077106013610 0ustar ripleyusers% file MASS/man/housing.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{housing} \alias{housing} \title{ Frequency Table from a Copenhagen Housing Conditions Survey } \description{ The \code{housing} data frame has 72 rows and 5 variables. } \usage{ housing } \format{ \describe{ \item{\code{Sat}}{ Satisfaction of householders with their present housing circumstances, (High, Medium or Low, ordered factor). } \item{\code{Infl}}{ Perceived degree of influence householders have on the management of the property (High, Medium, Low). } \item{\code{Type}}{ Type of rental accommodation, (Tower, Atrium, Apartment, Terrace). } \item{\code{Cont}}{ Contact residents are afforded with other residents, (Low, High). } \item{\code{Freq}}{ Frequencies: the numbers of residents in each class. } } } \source{ Madsen, M. (1976) Statistical analysis of multiple contingency tables. Two examples. \emph{Scand. J. Statist.} \bold{3}, 97--106. Cox, D. R. and Snell, E. J. (1984) \emph{Applied Statistics, Principles and Examples}. Chapman & Hall. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) # Surrogate Poisson models house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family = poisson, data = housing) ## IGNORE_RDIFF_BEGIN summary(house.glm0, cor = FALSE) ## IGNORE_RDIFF_END addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test = "Chisq") house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) summary(house.glm1, cor = FALSE) 1 - pchisq(deviance(house.glm1), house.glm1$df.residual) dropterm(house.glm1, test = "Chisq") addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq") hnames <- lapply(housing[, -5], levels) # omit Freq newData <- expand.grid(hnames) newData$Sat <- ordered(newData$Sat) house.pm <- predict(house.glm1, newData, type = "response") # poisson means house.pm <- matrix(house.pm, ncol = 3, byrow = TRUE, dimnames = list(NULL, hnames[[1]])) house.pr <- house.pm/drop(house.pm \%*\% rep(1, 3)) cbind(expand.grid(hnames[-1]), round(house.pr, 2)) # Iterative proportional scaling loglm(Freq ~ Infl*Type*Cont + Sat*(Infl+Type+Cont), data = housing) # multinomial model library(nnet) (house.mult<- multinom(Sat ~ Infl + Type + Cont, weights = Freq, data = housing)) house.mult2 <- multinom(Sat ~ Infl*Type*Cont, weights = Freq, data = housing) anova(house.mult, house.mult2) house.pm <- predict(house.mult, expand.grid(hnames[-1]), type = "probs") cbind(expand.grid(hnames[-1]), round(house.pm, 2)) # proportional odds model house.cpr <- apply(house.pr, 1, cumsum) logit <- function(x) log(x/(1-x)) house.ld <- logit(house.cpr[2, ]) - logit(house.cpr[1, ]) (ratio <- sort(drop(house.ld))) mean(ratio) (house.plr <- polr(Sat ~ Infl + Type + Cont, data = housing, weights = Freq)) house.pr1 <- predict(house.plr, expand.grid(hnames[-1]), type = "probs") cbind(expand.grid(hnames[-1]), round(house.pr1, 2)) Fr <- matrix(housing$Freq, ncol = 3, byrow = TRUE) 2*sum(Fr*log(house.pr/house.pr1)) house.plr2 <- stepAIC(house.plr, ~.^2) house.plr2$anova } \keyword{datasets} MASS/man/predict.glmmPQL.Rd0000644000176000001440000000337111754562034015064 0ustar ripleyusers\name{predict.glmmPQL} \alias{predict.glmmPQL} \title{Predict Method for glmmPQL Fits} \description{ Obtains predictions from a fitted generalized linear model with random effects. } \usage{ \method{predict}{glmmPQL}(object, newdata = NULL, type = c("link", "response"), level, na.action = na.pass, ...) } \arguments{ \item{object}{a fitted object of class inheriting from \code{"glmmPQL"}.} \item{newdata}{optionally, a data frame in which to look for variables with which to predict.} \item{type}{the type of prediction required. The default is on the scale of the linear predictors; the alternative \code{"response"} is on the scale of the response variable. Thus for a default binomial model the default predictions are of log-odds (probabilities on logit scale) and \code{type = "response"} gives the predicted probabilities.} \item{level}{an optional integer vector giving the level(s) of grouping to be used in obtaining the predictions. Level values increase from outermost to innermost grouping, with level zero corresponding to the population predictions. Defaults to the highest or innermost level of grouping.} \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{\dots}{further arguments passed to or from other methods.} } \value{ If \code{level} is a single integer, a vector otherwise a data frame. } \seealso{ \code{\link{glmmPQL}}, \code{\link[nlme]{predict.lme}}. } \examples{ fit <- glmmPQL(y ~ trt + I(week > 2), random = ~1 | ID, family = binomial, data = bacteria) predict(fit, bacteria, level = 0, type="response") predict(fit, bacteria, level = 1, type="response") } \keyword{models} MASS/man/dose.p.Rd0000644000176000001440000000221711754562034013310 0ustar ripleyusers\name{dose.p} \alias{dose.p} \alias{print.glm.dose} \title{ Predict Doses for Binomial Assay model } \description{ Calibrate binomial assays, generalizing the calculation of LD50. } \usage{ dose.p(obj, cf = 1:2, p = 0.5) } \arguments{ \item{obj}{ A fitted model object of class inheriting from \code{"glm"}. } \item{cf}{ The terms in the coefficient vector giving the intercept and coefficient of (log-)dose } \item{p}{ Probabilities at which to predict the dose needed. }} \value{ An object of class \code{"glm.dose"} giving the prediction (attribute \code{"p"} and standard error (attribute \code{"SE"}) at each response probability. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Springer. } \examples{ ldose <- rep(0:5, 2) numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) sex <- factor(rep(c("M", "F"), c(6, 6))) SF <- cbind(numdead, numalive = 20 - numdead) budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial) dose.p(budworm.lg0, cf = c(1,3), p = 1:3/4) dose.p(update(budworm.lg0, family = binomial(link=probit)), cf = c(1,3), p = 1:3/4) } \keyword{regression} \keyword{models} MASS/man/crabs.Rd0000644000176000001440000000254111754562034013212 0ustar ripleyusers% file MASS/man/crabs.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{crabs} \alias{crabs} \title{ Morphological Measurements on Leptograpsus Crabs } \description{ The \code{crabs} data frame has 200 rows and 8 columns, describing 5 morphological measurements on 50 crabs each of two colour forms and both sexes, of the species \emph{Leptograpsus variegatus} collected at Fremantle, W. Australia. } \usage{ crabs } \format{ This data frame contains the following columns: \describe{ \item{\code{sp}}{ \code{species} - \code{"B"} or \code{"O"} for blue or orange. } \item{\code{sex}}{ as it says. } \item{\code{index}}{ index \code{1:50} within each of the four groups. } \item{\code{FL}}{ frontal lobe size (mm). } \item{\code{RW}}{ rear width (mm). } \item{\code{CL}}{ carapace length (mm). } \item{\code{CW}}{ carapace width (mm). } \item{\code{BD}}{ body depth (mm). } } } \source{ Campbell, N.A. and Mahon, R.J. (1974) A multivariate study of variation in two species of rock crab of genus \emph{Leptograpsus.} \emph{Australian Journal of Zoology} \bold{22}, 417--425. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/phones.Rd0000644000176000001440000000131611754562034013413 0ustar ripleyusers% file MASS/man/phones.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{Belgian-phones} \alias{phones} \title{ Belgium Phone Calls 1950-1973 } \usage{ phones } \description{ A list object with the annual numbers of telephone calls, in Belgium. The components are: \describe{ \item{\code{year}}{ last two digits of the year. } \item{\code{calls}}{ number of telephone calls made (in millions of calls). } } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression & Outlier Detection.} Wiley. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/lqs.Rd0000644000176000001440000001607113577077213012727 0ustar ripleyusers% file lqs/man/lqs.Rd % copyright (C) 1998-9, 2007 B. D. Ripley (C) 2001-2 R Core Development Team % \name{lqs} \alias{lqs} \alias{lqs.formula} \alias{lqs.default} \alias{lmsreg} \alias{ltsreg} %\alias{print.lqs} \title{ Resistant Regression } \description{ Fit a regression to the \emph{good} points in the dataset, thereby achieving a regression estimator with a high breakdown point. \code{lmsreg} and \code{ltsreg} are compatibility wrappers. } \usage{ lqs(x, \dots) \method{lqs}{formula}(formula, data, \dots, method = c("lts", "lqs", "lms", "S", "model.frame"), subset, na.action, model = TRUE, x.ret = FALSE, y.ret = FALSE, contrasts = NULL) \method{lqs}{default}(x, y, intercept = TRUE, method = c("lts", "lqs", "lms", "S"), quantile, control = lqs.control(\dots), k0 = 1.548, seed, \dots) lmsreg(\dots) ltsreg(\dots) } \arguments{ \item{formula}{a formula of the form \code{y ~ x1 + x2 + \dots}.} \item{data}{data frame from which variables specified in \code{formula} are preferentially to be taken.} \item{subset}{an index vector specifying the cases to be used in fitting. (NOTE: If given, this argument must be named exactly.)} \item{na.action}{function to specify the action to be taken if \code{NA}s are found. The default action is for the procedure to fail. Alternatives include \code{\link{na.omit}} and \code{\link{na.exclude}}, which lead to omission of cases with missing values on any required variable. (NOTE: If given, this argument must be named exactly.) } \item{model, x.ret, y.ret}{logical. If \code{TRUE} the model frame, the model matrix and the response are returned, respectively.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}.} \item{x}{a matrix or data frame containing the explanatory variables.} \item{y}{the response: a vector of length the number of rows of \code{x}.} \item{intercept}{should the model include an intercept?} \item{method}{ the method to be used. \code{model.frame} returns the model frame: for the others see the \code{Details} section. Using \code{lmsreg} or \code{ltsreg} forces \code{"lms"} and \code{"lts"} respectively. } \item{quantile}{ the quantile to be used: see \code{Details}. This is over-ridden if \code{method = "lms"}. } \item{control}{additional control items: see \code{Details}.} \item{k0}{the cutoff / tuning constant used for \eqn{\chi()}{chi()} and \eqn{\psi()}{psi()} functions when \code{method = "S"}, currently corresponding to Tukey's \sQuote{biweight}.} \item{seed}{ the seed to be used for random sampling: see \code{.Random.seed}. The current value of \code{.Random.seed} will be preserved if it is set.. } \item{\dots}{arguments to be passed to \code{lqs.default} or \code{lqs.control}, see \code{control} above and \code{Details}.} } \value{ An object of class \code{"lqs"}. This is a list with components \item{crit}{the value of the criterion for the best solution found, in the case of \code{method == "S"} before IWLS refinement.} \item{sing}{character. A message about the number of samples which resulted in singular fits.} \item{coefficients}{of the fitted linear model} \item{bestone}{the indices of those points fitted by the best sample found (prior to adjustment of the intercept, if requested).} \item{fitted.values}{the fitted values.} \item{residuals}{the residuals.} \item{scale}{estimate(s) of the scale of the error. The first is based on the fit criterion. The second (not present for \code{method == "S"}) is based on the variance of those residuals whose absolute value is less than 2.5 times the initial estimate.} } \details{ Suppose there are \code{n} data points and \code{p} regressors, including any intercept. The first three methods minimize some function of the sorted squared residuals. For methods \code{"lqs"} and \code{"lms"} is the \code{quantile} squared residual, and for \code{"lts"} it is the sum of the \code{quantile} smallest squared residuals. \code{"lqs"} and \code{"lms"} differ in the defaults for \code{quantile}, which are \code{floor((n+p+1)/2)} and \code{floor((n+1)/2)} respectively. For \code{"lts"} the default is \code{floor(n/2) + floor((p+1)/2)}. The \code{"S"} estimation method solves for the scale \code{s} such that the average of a function chi of the residuals divided by \code{s} is equal to a given constant. The \code{control} argument is a list with components \describe{ \item{\code{psamp}:}{the size of each sample. Defaults to \code{p}.} \item{\code{nsamp}:}{the number of samples or \code{"best"} (the default) or \code{"exact"} or \code{"sample"}. If \code{"sample"} the number chosen is \code{min(5*p, 3000)}, taken from Rousseeuw and Hubert (1997). If \code{"best"} exhaustive enumeration is done up to 5000 samples; if \code{"exact"} exhaustive enumeration will be attempted however many samples are needed.} \item{\code{adjust}:}{should the intercept be optimized for each sample? Defaults to \code{TRUE}.} } } \note{ There seems no reason other than historical to use the \code{lms} and \code{lqs} options. LMS estimation is of low efficiency (converging at rate \eqn{n^{-1/3}}) whereas LTS has the same asymptotic efficiency as an M estimator with trimming at the quartiles (Marazzi, 1993, p.201). LQS and LTS have the same maximal breakdown value of \code{(floor((n-p)/2) + 1)/n} attained if \code{floor((n+p)/2) <= quantile <= floor((n+p+1)/2)}. The only drawback mentioned of LTS is greater computation, as a sort was thought to be required (Marazzi, 1993, p.201) but this is not true as a partial sort can be used (and is used in this implementation). Adjusting the intercept for each trial fit does need the residuals to be sorted, and may be significant extra computation if \code{n} is large and \code{p} small. Opinions differ over the choice of \code{psamp}. Rousseeuw and Hubert (1997) only consider p; Marazzi (1993) recommends p+1 and suggests that more samples are better than adjustment for a given computational limit. The computations are exact for a model with just an intercept and adjustment, and for LQS for a model with an intercept plus one regressor and exhaustive search with adjustment. For all other cases the minimization is only known to be approximate. } \references{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley. A. Marazzi (1993) \emph{Algorithms, Routines and S Functions for Robust Statistics.} Wadsworth and Brooks/Cole. P. Rousseeuw and M. Hubert (1997) Recent developments in PROGRESS. In \emph{L1-Statistical Procedures and Related Topics}, ed Y. Dodge, IMS Lecture Notes volume \bold{31}, pp. 201--214. } \seealso{ \code{\link{predict.lqs}} } \examples{ ## IGNORE_RDIFF_BEGIN set.seed(123) # make reproducible lqs(stack.loss ~ ., data = stackloss) lqs(stack.loss ~ ., data = stackloss, method = "S", nsamp = "exact") ## IGNORE_RDIFF_END } \keyword{models} \keyword{robust} MASS/man/lda.Rd0000644000176000001440000001317511754562034012665 0ustar ripleyusers% file MASS/man/lda.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{lda} \alias{lda} \alias{lda.default} \alias{lda.data.frame} \alias{lda.formula} \alias{lda.matrix} \alias{model.frame.lda} \alias{print.lda} \alias{coef.lda} \title{ Linear Discriminant Analysis } \description{ Linear discriminant analysis. } \usage{ lda(x, \dots) \method{lda}{formula}(formula, data, \dots, subset, na.action) \method{lda}{default}(x, grouping, prior = proportions, tol = 1.0e-4, method, CV = FALSE, nu, \dots) \method{lda}{data.frame}(x, \dots) \method{lda}{matrix}(x, grouping, \dots, subset, na.action) } \arguments{ \item{formula}{ A formula of the form \code{groups ~ x1 + x2 + \dots} That is, the response is the grouping factor and the right hand side specifies the (non-factor) discriminators. } \item{data}{ Data frame from which variables specified in \code{formula} are preferentially to be taken. } \item{x}{ (required if no formula is given as the principal argument.) a matrix or data frame or Matrix containing the explanatory variables. } \item{grouping}{ (required if no formula principal argument is given.) a factor specifying the class for each observation. } \item{prior}{ the prior probabilities of class membership. If unspecified, the class proportions for the training set are used. If present, the probabilities should be specified in the order of the factor levels. } \item{tol}{ A tolerance to decide if a matrix is singular; it will reject variables and linear combinations of unit-variance variables whose variance is less than \code{tol^2}. } \item{subset}{ An index vector specifying the cases to be used in the training sample. (NOTE: If given, this argument must be named.) } \item{na.action}{ A function to specify the action to be taken if \code{NA}s are found. The default action is for the procedure to fail. An alternative is \code{na.omit}, which leads to rejection of cases with missing values on any required variable. (NOTE: If given, this argument must be named.) } \item{method}{ \code{"moment"} for standard estimators of the mean and variance, \code{"mle"} for MLEs, \code{"mve"} to use \code{\link{cov.mve}}, or \code{"t"} for robust estimates based on a \eqn{t} distribution. } \item{CV}{ If true, returns results (classes and posterior probabilities) for leave-one-out cross-validation. Note that if the prior is estimated, the proportions in the whole dataset are used. } \item{nu}{ degrees of freedom for \code{method = "t"}. } \item{\dots}{ arguments passed to or from other methods. }} \value{ If \code{CV = TRUE} the return value is a list with components \code{class}, the MAP classification (a factor), and \code{posterior}, posterior probabilities for the classes. Otherwise it is an object of class \code{"lda"} containing the following components: \item{prior}{ the prior probabilities used. } \item{means}{ the group means. } \item{scaling}{ a matrix which transforms observations to discriminant functions, normalized so that within groups covariance matrix is spherical. } \item{svd}{ the singular values, which give the ratio of the between- and within-group standard deviations on the linear discriminant variables. Their squares are the canonical F-statistics. } \item{N}{ The number of observations used. } \item{call}{ The (matched) function call. } } \details{ The function tries hard to detect if the within-class covariance matrix is singular. If any variable has within-group variance less than \code{tol^2} it will stop and report the variable as constant. This could result from poor scaling of the problem, but is more likely to result from constant variables. Specifying the \code{prior} will affect the classification unless over-ridden in \code{predict.lda}. Unlike in most statistical packages, it will also affect the rotation of the linear discriminants within their space, as a weighted between-groups covariance matrix is used. Thus the first few linear discriminants emphasize the differences between groups with the weights given by the prior, which may differ from their prevalence in the dataset. If one or more groups is missing in the supplied data, they are dropped with a warning, but the classifications produced are with respect to the original set of levels. } \note{ This function may be called giving either a formula and optional data frame, or a matrix and grouping factor as the first two arguments. All other arguments are optional, but \code{subset=} and \code{na.action=}, if required, must be fully named. If a formula is given as the principal argument the object may be modified using \code{update()} in the usual way. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. Ripley, B. D. (1996) \emph{Pattern Recognition and Neural Networks}. Cambridge University Press. } \seealso{ \code{\link{predict.lda}}, \code{\link{qda}}, \code{\link{predict.qda}} } \examples{ Iris <- data.frame(rbind(iris3[,,1], iris3[,,2], iris3[,,3]), Sp = rep(c("s","c","v"), rep(50,3))) train <- sample(1:150, 75) table(Iris$Sp[train]) ## your answer may differ ## c s v ## 22 23 30 z <- lda(Sp ~ ., Iris, prior = c(1,1,1)/3, subset = train) predict(z, Iris[-train, ])$class ## [1] s s s s s s s s s s s s s s s s s s s s s s s s s s s c c c ## [31] c c c c c c c v c c c c v c c c c c c c c c c c c v v v v v ## [61] v v v v v v v v v v v v v v v (z1 <- update(z, . ~ . - Petal.W.)) } \keyword{multivariate} MASS/man/biopsy.Rd0000644000176000001440000000450711754562034013431 0ustar ripleyusers% file MASS/man/biopsy.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{biopsy} \alias{biopsy} \title{ Biopsy Data on Breast Cancer Patients } \description{ This breast cancer database was obtained from the University of Wisconsin Hospitals, Madison from Dr. William H. Wolberg. He assessed biopsies of breast tumours for 699 patients up to 15 July 1992; each of nine attributes has been scored on a scale of 1 to 10, and the outcome is also known. There are 699 rows and 11 columns. } \usage{ biopsy } \format{ This data frame contains the following columns: \describe{ \item{\code{ID}}{sample code number (not unique).} \item{\code{V1}}{clump thickness.} \item{\code{V2}}{uniformity of cell size.} \item{\code{V3}}{uniformity of cell shape.} \item{\code{V4}}{marginal adhesion.} \item{\code{V5}}{single epithelial cell size.} \item{\code{V6}}{bare nuclei (16 values are missing).} \item{\code{V7}}{bland chromatin.} \item{\code{V8}}{normal nucleoli.} \item{\code{V9}}{mitoses.} \item{\code{class}}{\code{"benign"} or \code{"malignant"}.} } } \source{ P. M. Murphy and D. W. Aha (1992). UCI Repository of machine learning databases. [Machine-readable data repository]. Irvine, CA: University of California, Department of Information and Computer Science. O. L. Mangasarian and W. H. Wolberg (1990) Cancer diagnosis via linear programming. \emph{SIAM News} \bold{23}, pp 1 & 18. William H. Wolberg and O.L. Mangasarian (1990) Multisurface method of pattern separation for medical diagnosis applied to breast cytology. \emph{Proceedings of the National Academy of Sciences, U.S.A.} \bold{87}, pp. 9193--9196. O. L. Mangasarian, R. Setiono and W.H. Wolberg (1990) Pattern recognition via linear programming: Theory and application to medical diagnosis. In \emph{Large-scale Numerical Optimization} eds Thomas F. Coleman and Yuying Li, SIAM Publications, Philadelphia, pp 22--30. K. P. Bennett and O. L. Mangasarian (1992) Robust linear programming discrimination of two linearly inseparable sets. \emph{Optimization Methods and Software} \bold{1}, pp. 23--34 (Gordon & Breach Science Publishers). } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/fgl.Rd0000644000176000001440000000255211754562034012672 0ustar ripleyusers% file MASS/man/fgl.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{fgl} \alias{fgl} \title{ Measurements of Forensic Glass Fragments } \description{ The \code{fgl} data frame has 214 rows and 10 columns. It was collected by B. German on fragments of glass collected in forensic work. } \usage{ fgl } \format{ This data frame contains the following columns: \describe{ \item{\code{RI}}{ refractive index; more precisely the refractive index is 1.518xxxx. The next 8 measurements are percentages by weight of oxides. } \item{\code{Na}}{sodium.} \item{\code{Mg}}{manganese.} \item{\code{Al}}{aluminium.} \item{\code{Si}}{silicon.} \item{\code{K}}{potassium.} \item{\code{Ca}}{calcium.} \item{\code{Ba}}{barium.} \item{\code{Fe}}{iron.} \item{\code{type}}{ The fragments were originally classed into seven types, one of which was absent in this dataset. The categories which occur are window float glass (\code{WinF}: 70), window non-float glass (\code{WinNF}: 76), vehicle window glass (\code{Veh}: 17), containers (\code{Con}: 13), tableware (\code{Tabl}: 9) and vehicle headlamps (\code{Head}: 29). } } } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/topo.Rd0000644000176000001440000000131311754562034013075 0ustar ripleyusers% file MASS/man/topo.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{topo} \alias{topo} \title{ Spatial Topographic Data } \description{ The \code{topo} data frame has 52 rows and 3 columns, of topographic heights within a 310 feet square. } \usage{ topo } \format{ This data frame contains the following columns: \describe{ \item{\code{x}}{ x coordinates (units of 50 feet) } \item{\code{y}}{ y coordinates (units of 50 feet) } \item{\code{z}}{ heights (feet) }}} \source{ Davis, J.C. (1973) \emph{Statistics and Data Analysis in Geology.} Wiley. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/wtloss.Rd0000644000176000001440000000174213577077500013461 0ustar ripleyusers% file MASS/man/wtloss.Rd % copyright (C) 1994-2011 W. N. Venables and B. D. Ripley % \name{wtloss} \alias{wtloss} \title{ Weight Loss Data from an Obese Patient } \description{ The data frame gives the weight, in kilograms, of an obese patient at 52 time points over an 8 month period of a weight rehabilitation programme. } \usage{ wtloss } \format{ This data frame contains the following columns: \describe{ \item{\code{Days}}{ time in days since the start of the programme. } \item{\code{Weight}}{ weight in kilograms of the patient. } } } \source{ Dr T. Davies, Adelaide. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ ## IGNORE_RDIFF_BEGIN wtloss.fm <- nls(Weight ~ b0 + b1*2^(-Days/th), data = wtloss, start = list(b0=90, b1=95, th=120)) wtloss.fm ## IGNORE_RDIFF_END plot(wtloss) with(wtloss, lines(Days, fitted(wtloss.fm))) } \keyword{datasets} MASS/man/confint.Rd0000644000176000001440000000571111754562034013562 0ustar ripleyusers% file MASS/man/confint.Rd % copyright (C) 1999-2010 W. N. Venables and B. D. Ripley % \name{confint-MASS} \alias{confint.glm} \alias{confint.nls} \alias{confint.profile.glm} \alias{confint.profile.nls} %\alias{profile.glm} \title{ Confidence Intervals for Model Parameters } \description{ Computes confidence intervals for one or more parameters in a fitted model. Package \pkg{MASS} adds methods for \code{glm} and \code{nls} fits. } \usage{ \method{confint}{glm}(object, parm, level = 0.95, trace = FALSE, \dots) \method{confint}{nls}(object, parm, level = 0.95, \dots) } \arguments{ \item{object}{ a fitted model object. Methods currently exist for the classes \code{"glm"}, \code{"nls"} and for profile objects from these classes. } \item{parm}{ a specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered. } \item{level}{ the confidence level required. } \item{trace}{ logical. Should profiling be traced? } \item{\dots}{ additional argument(s) for methods. } } \value{ A matrix (or vector) with columns giving lower and upper confidence limits for each parameter. These will be labelled as (1 - level)/2 and 1 - (1 - level)/2 in \% (by default 2.5\% and 97.5\%). } \details{ \code{\link[stats]{confint}} is a generic function in package \code{stats}. These \code{confint} methods call the appropriate profile method, then find the confidence intervals by interpolation in the profile traces. If the profile object is already available it should be used as the main argument rather than the fitted model object itself. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link[stats]{confint}} (the generic and \code{"lm"} method), \code{\link{profile}} } \examples{ expn1 <- deriv(y ~ b0 + b1 * 2^(-x/th), c("b0", "b1", "th"), function(b0, b1, th, x) {}) wtloss.gr <- nls(Weight ~ expn1(b0, b1, th, Days), data = wtloss, start = c(b0=90, b1=95, th=120)) expn2 <- deriv(~b0 + b1*((w0 - b0)/b1)^(x/d0), c("b0","b1","d0"), function(b0, b1, d0, x, w0) {}) wtloss.init <- function(obj, w0) { p <- coef(obj) d0 <- - log((w0 - p["b0"])/p["b1"])/log(2) * p["th"] c(p[c("b0", "b1")], d0 = as.vector(d0)) } out <- NULL w0s <- c(110, 100, 90) for(w0 in w0s) { fm <- nls(Weight ~ expn2(b0, b1, d0, Days, w0), wtloss, start = wtloss.init(wtloss.gr, w0)) out <- rbind(out, c(coef(fm)["d0"], confint(fm, "d0"))) } dimnames(out) <- list(paste(w0s, "kg:"), c("d0", "low", "high")) out ldose <- rep(0:5, 2) numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) sex <- factor(rep(c("M", "F"), c(6, 6))) SF <- cbind(numdead, numalive = 20 - numdead) budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial) confint(budworm.lg0) confint(budworm.lg0, "ldose") } \keyword{models} MASS/man/genotype.Rd0000644000176000001440000000230411754562034013747 0ustar ripleyusers% file MASS/man/genotype.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{genotype} \alias{genotype} \title{ Rat Genotype Data } \description{ Data from a foster feeding experiment with rat mothers and litters of four different genotypes: \code{A}, \code{B}, \code{I} and \code{J}. Rat litters were separated from their natural mothers at birth and given to foster mothers to rear. } \usage{ genotype } \format{ The data frame has the following components: \describe{ \item{\code{Litter}}{ genotype of the litter. } \item{\code{Mother}}{ genotype of the foster mother. } \item{\code{Wt}}{ Litter average weight gain of the litter, in grams at age 28 days. (The source states that the within-litter variability is negligible.) } } } \source{ Scheffe, H. (1959) \emph{The Analysis of Variance} Wiley p. 140. Bailey, D. W. (1953) \emph{The Inheritance of Maternal Influences on the Growth of the Rat.} Unpublished Ph.D. thesis, University of California. Table B of the Appendix. } \references{ Venables, W. N. and Ripley, B. D. (1999) \emph{Modern Applied Statistics with S-PLUS.} Third Edition. Springer. } \keyword{datasets} MASS/man/boxcox.Rd0000644000176000001440000000477711754562034013437 0ustar ripleyusers% file MASS/man/boxcox.Rd % copyright (C) 1994-2005 W. N. Venables and B. D. Ripley % \name{boxcox} \alias{boxcox} \alias{boxcox.default} \alias{boxcox.formula} \alias{boxcox.lm} \title{ Box-Cox Transformations for Linear Models } \description{ Computes and optionally plots profile log-likelihoods for the parameter of the Box-Cox power transformation. } \usage{ boxcox(object, \dots) \method{boxcox}{default}(object, lambda = seq(-2, 2, 1/10), plotit = TRUE, interp, eps = 1/50, xlab = expression(lambda), ylab = "log-Likelihood", \dots) \method{boxcox}{formula}(object, lambda = seq(-2, 2, 1/10), plotit = TRUE, interp, eps = 1/50, xlab = expression(lambda), ylab = "log-Likelihood", \dots) \method{boxcox}{lm}(object, lambda = seq(-2, 2, 1/10), plotit = TRUE, interp, eps = 1/50, xlab = expression(lambda), ylab = "log-Likelihood", \dots) } \arguments{ \item{object}{a formula or fitted model object. Currently only \code{lm} and \code{aov} objects are handled.} \item{lambda}{vector of values of \code{lambda} -- default \eqn{(-2, 2)} in steps of 0.1.} \item{plotit}{logical which controls whether the result should be plotted.} \item{interp}{logical which controls whether spline interpolation is used. Default to \code{TRUE} if plotting with \code{lambda} of length less than 100.} \item{eps}{Tolerance for \code{lambda = 0}; defaults to 0.02.} \item{xlab}{defaults to \code{"lambda"}.} \item{ylab}{defaults to \code{"log-Likelihood"}.} \item{\dots}{additional parameters to be used in the model fitting.} } \value{ A list of the \code{lambda} vector and the computed profile log-likelihood vector, invisibly if the result is plotted. } \section{Side Effects}{ If \code{plotit = TRUE} plots log-likelihood \emph{vs} \code{lambda} and indicates a 95\% confidence interval about the maximum observed value of \code{lambda}. If \code{interp = TRUE}, spline interpolation is used to give a smoother plot. } \references{ Box, G. E. P. and Cox, D. R. (1964) An analysis of transformations (with discussion). \emph{Journal of the Royal Statistical Society B}, \bold{26}, 211--252. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ boxcox(Volume ~ log(Height) + log(Girth), data = trees, lambda = seq(-0.25, 0.25, length = 10)) boxcox(Days+1 ~ Eth*Sex*Age*Lrn, data = quine, lambda = seq(-0.05, 0.45, len = 20)) } \keyword{regression} \keyword{models} \keyword{hplot} MASS/man/lm.gls.Rd0000644000176000001440000000347711754562034013325 0ustar ripleyusers% file MASS/man/lm.gls.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{lm.gls} \alias{lm.gls} \title{ Fit Linear Models by Generalized Least Squares } \description{ Fit linear models by Generalized Least Squares } \usage{ lm.gls(formula, data, W, subset, na.action, inverse = FALSE, method = "qr", model = FALSE, x = FALSE, y = FALSE, contrasts = NULL, \dots) } \arguments{ \item{formula}{ a formula expression as for regression models, of the form \code{response ~ predictors}. See the documentation of \code{formula} for other details. } \item{data}{ an optional data frame in which to interpret the variables occurring in \code{formula}. } \item{W}{ a weight matrix. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{na.action}{ a function to filter missing data. } \item{inverse}{ logical: if true \code{W} specifies the inverse of the weight matrix: this is appropriate if a variance matrix is used. } \item{method}{ method to be used by \code{lm.fit}. } \item{model}{ should the model frame be returned? } \item{x}{ should the design matrix be returned? } \item{y}{ should the response be returned? } \item{contrasts}{ a list of contrasts to be used for some or all of } \item{\dots}{ additional arguments to \code{\link{lm.fit}}. }} \value{ An object of class \code{"lm.gls"}, which is similar to an \code{"lm"} object. There is no \code{"weights"} component, and only a few \code{"lm"} methods will work correctly. As from version 7.1-22 the residuals and fitted values refer to the untransformed problem. } \details{ The problem is transformed to uncorrelated form and passed to \code{\link{lm.fit}}. } \seealso{ \code{\link[nlme]{gls}}, \code{\link{lm}}, \code{\link{lm.ridge}} } \keyword{models} MASS/man/fitdistr.Rd0000644000176000001440000001037613243747121013752 0ustar ripleyusers% file MASS/man/fitdistr.Rd % copyright (C) 2001-11 W. N. Venables and B. D. Ripley % \name{fitdistr} \alias{fitdistr} %\alias{print.fitdistr} %\alias{coef.fitdistr} %\alias{logLik.fitdistr} \title{ Maximum-likelihood Fitting of Univariate Distributions } \description{ Maximum-likelihood fitting of univariate distributions, allowing parameters to be held fixed if desired. } \usage{ fitdistr(x, densfun, start, \dots) } \arguments{ \item{x}{ A numeric vector of length at least one containing only \link{finite} values. } \item{densfun}{ Either a character string or a function returning a density evaluated at its first argument. Distributions \code{"beta"}, \code{"cauchy"}, \code{"chi-squared"}, \code{"exponential"}, \code{"gamma"}, \code{"geometric"}, \code{"log-normal"}, \code{"lognormal"}, \code{"logistic"}, \code{"negative binomial"}, \code{"normal"}, \code{"Poisson"}, \code{"t"} and \code{"weibull"} are recognised, case being ignored. } \item{start}{ A named list giving the parameters to be optimized with initial values. This can be omitted for some of the named distributions and must be for others (see Details). } \item{\dots}{ Additional parameters, either for \code{densfun} or for \code{optim}. In particular, it can be used to specify bounds via \code{lower} or \code{upper} or both. If arguments of \code{densfun} (or the density function corresponding to a character-string specification) are included they will be held fixed. }} \value{ An object of class \code{"fitdistr"}, a list with four components, \item{estimate}{the parameter estimates,} \item{sd}{the estimated standard errors,} \item{vcov}{the estimated variance-covariance matrix, and} \item{loglik}{the log-likelihood.} } \details{ For the Normal, log-Normal, geometric, exponential and Poisson distributions the closed-form MLEs (and exact standard errors) are used, and \code{start} should not be supplied. For all other distributions, direct optimization of the log-likelihood is performed using \code{\link{optim}}. The estimated standard errors are taken from the observed information matrix, calculated by a numerical approximation. For one-dimensional problems the Nelder-Mead method is used and for multi-dimensional problems the BFGS method, unless arguments named \code{lower} or \code{upper} are supplied (when \code{L-BFGS-B} is used) or \code{method} is supplied explicitly. For the \code{"t"} named distribution the density is taken to be the location-scale family with location \code{m} and scale \code{s}. For the following named distributions, reasonable starting values will be computed if \code{start} is omitted or only partially specified: \code{"cauchy"}, \code{"gamma"}, \code{"logistic"}, \code{"negative binomial"} (parametrized by \code{mu} and \code{size}), \code{"t"} and \code{"weibull"}. Note that these starting values may not be good enough if the fit is poor: in particular they are not resistant to outliers unless the fitted distribution is long-tailed. There are \code{\link{print}}, \code{\link{coef}}, \code{\link{vcov}} and \code{\link{logLik}} methods for class \code{"fitdistr"}. } \note{ Numerical optimization cannot work miracles: please note the comments in \code{\link{optim}} on scaling data. If the fitted parameters are far away from one, consider re-fitting specifying the control parameter \code{parscale}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ ## avoid spurious accuracy op <- options(digits = 3) set.seed(123) x <- rgamma(100, shape = 5, rate = 0.1) fitdistr(x, "gamma") ## now do this directly with more control. fitdistr(x, dgamma, list(shape = 1, rate = 0.1), lower = 0.001) set.seed(123) x2 <- rt(250, df = 9) fitdistr(x2, "t", df = 9) ## allow df to vary: not a very good idea! fitdistr(x2, "t") ## now do fixed-df fit directly with more control. mydt <- function(x, m, s, df) dt((x-m)/s, df)/s fitdistr(x2, mydt, list(m = 0, s = 1), df = 9, lower = c(-Inf, 0)) set.seed(123) x3 <- rweibull(100, shape = 4, scale = 100) fitdistr(x3, "weibull") set.seed(123) x4 <- rnegbin(500, mu = 5, theta = 4) fitdistr(x4, "Negative Binomial") options(op) } \keyword{distribution} \keyword{htest} MASS/man/chem.Rd0000644000176000001440000000105711754562034013035 0ustar ripleyusers% file MASS/man/chem.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{chem} \alias{chem} \title{ Copper in Wholemeal Flour } \description{ A numeric vector of 24 determinations of copper in wholemeal flour, in parts per million. } \usage{ chem } \source{ Analytical Methods Committee (1989) Robust statistics -- how not to reject outliers. \emph{The Analyst} \bold{114}, 1693--1702. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/petrol.Rd0000644000176000001440000000344511754562034013431 0ustar ripleyusers% file MASS/man/petrol.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{petrol} \alias{petrol} \title{ N. L. Prater's Petrol Refinery Data } \description{ The yield of a petroleum refining process with four covariates. The crude oil appears to come from only 10 distinct samples. These data were originally used by Prater (1956) to build an estimation equation for the yield of the refining process of crude oil to gasoline. } \usage{ petrol } \format{ The variables are as follows \describe{ \item{\code{No}}{ crude oil sample identification label. (Factor.) } \item{\code{SG}}{ specific gravity, degrees API. (Constant within sample.) } \item{\code{VP}}{ vapour pressure in pounds per square inch. (Constant within sample.) } \item{\code{V10}}{ volatility of crude; ASTM 10\% point. (Constant within sample.) } \item{\code{EP}}{ desired volatility of gasoline. (The end point. Varies within sample.) } \item{\code{Y}}{ yield as a percentage of crude. } } } \source{ N. H. Prater (1956) Estimate gasoline yields from crudes. \emph{Petroleum Refiner} \bold{35}, 236--238. This dataset is also given in D. J. Hand, F. Daly, K. McConway, D. Lunn and E. Ostrowski (eds) (1994) \emph{A Handbook of Small Data Sets.} Chapman & Hall. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ library(nlme) Petrol <- petrol Petrol[, 2:5] <- scale(as.matrix(Petrol[, 2:5]), scale = FALSE) pet3.lme <- lme(Y ~ SG + VP + V10 + EP, random = ~ 1 | No, data = Petrol) pet3.lme <- update(pet3.lme, method = "ML") pet4.lme <- update(pet3.lme, fixed = Y ~ V10 + EP) anova(pet4.lme, pet3.lme) } \keyword{datasets} MASS/man/motors.Rd0000644000176000001440000000331111754562034013437 0ustar ripleyusers% file MASS/man/motors.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{motors} \alias{motors} \title{ Accelerated Life Testing of Motorettes } \description{ The \code{motors} data frame has 40 rows and 3 columns. It describes an accelerated life test at each of four temperatures of 10 motorettes, and has rather discrete times. } \usage{ motors } \format{ This data frame contains the following columns: \describe{ \item{\code{temp}}{ the temperature (degrees C) of the test. } \item{\code{time}}{ the time in hours to failure or censoring at 8064 hours (= 336 days). } \item{\code{cens}}{ an indicator variable for death. } } } \source{ Kalbfleisch, J. D. and Prentice, R. L. (1980) \emph{The Statistical Analysis of Failure Time Data.} New York: Wiley. taken from Nelson, W. D. and Hahn, G. J. (1972) Linear regression of a regression relationship from censored data. Part 1 -- simple methods and their application. \emph{Technometrics}, \bold{14}, 247--276. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ library(survival) plot(survfit(Surv(time, cens) ~ factor(temp), motors), conf.int = FALSE) # fit Weibull model motor.wei <- survreg(Surv(time, cens) ~ temp, motors) summary(motor.wei) # and predict at 130C unlist(predict(motor.wei, data.frame(temp=130), se.fit = TRUE)) motor.cox <- coxph(Surv(time, cens) ~ temp, motors) summary(motor.cox) # predict at temperature 200 plot(survfit(motor.cox, newdata = data.frame(temp=200), conf.type = "log-log")) summary( survfit(motor.cox, newdata = data.frame(temp=130)) ) } \keyword{datasets} MASS/man/stdres.Rd0000644000176000001440000000124111754562034013420 0ustar ripleyusers% file MASS/man/stdres.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{stdres} \alias{stdres} \alias{lmwork} \title{ Extract Standardized Residuals from a Linear Model } \description{ The standardized residuals. These are normalized to unit variance, fitted including the current data point. } \usage{ stdres(object) } \arguments{ \item{object}{ any object representing a linear model. }} \value{ The vector of appropriately transformed residuals. } \seealso{ \code{\link{residuals}}, \code{\link{studres}} } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{models} MASS/man/stepAIC.Rd0000644000176000001440000001344612171733033013407 0ustar ripleyusers% file MASS/man/stepAIC.Rd % copyright (C) 1994-2002 W. N. Venables and B. D. Ripley % \name{stepAIC} \alias{stepAIC} \alias{extractAIC.gls} \alias{terms.gls} \alias{extractAIC.lme} \alias{terms.lme} \title{ Choose a model by AIC in a Stepwise Algorithm } \description{ Performs stepwise model selection by AIC. } \usage{ stepAIC(object, scope, scale = 0, direction = c("both", "backward", "forward"), trace = 1, keep = NULL, steps = 1000, use.start = FALSE, k = 2, \dots) } \arguments{ \item{object}{ an object representing a model of an appropriate class. This is used as the initial model in the stepwise search. } \item{scope}{ defines the range of models examined in the stepwise search. This should be either a single formula, or a list containing components \code{upper} and \code{lower}, both formulae. See the details for how to specify the formulae and how they are used. } \item{scale}{ used in the definition of the AIC statistic for selecting the models, currently only for \code{\link{lm}} and \code{\link{aov}} models (see \code{\link{extractAIC}} for details). } \item{direction}{ the mode of stepwise search, can be one of \code{"both"}, \code{"backward"}, or \code{"forward"}, with a default of \code{"both"}. If the \code{scope} argument is missing the default for \code{direction} is \code{"backward"}. } \item{trace}{ if positive, information is printed during the running of \code{stepAIC}. Larger values may give more information on the fitting process. } \item{keep}{ a filter function whose input is a fitted model object and the associated \code{AIC} statistic, and whose output is arbitrary. Typically \code{keep} will select a subset of the components of the object and return them. The default is not to keep anything. } \item{steps}{ the maximum number of steps to be considered. The default is 1000 (essentially as many as required). It is typically used to stop the process early. } \item{use.start}{ if true the updated fits are done starting at the linear predictor for the currently selected model. This may speed up the iterative calculations for \code{glm} (and other fits), but it can also slow them down. \bold{Not used} in \R. } \item{k}{ the multiple of the number of degrees of freedom used for the penalty. Only \code{k = 2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC. } \item{\dots}{ any additional arguments to \code{extractAIC}. (None are currently used.) }} \value{ the stepwise-selected model is returned, with up to two additional components. There is an \code{"anova"} component corresponding to the steps taken in the search, as well as a \code{"keep"} component if the \code{keep=} argument was supplied in the call. The \code{"Resid. Dev"} column of the analysis of deviance table refers to a constant minus twice the maximized log likelihood: it will be a deviance only in cases where a saturated model is well-defined (thus excluding \code{lm}, \code{aov} and \code{survreg} fits, for example). } \details{ The set of models searched is determined by the \code{scope} argument. The right-hand-side of its \code{lower} component is always included in the model, and right-hand-side of the model is included in the \code{upper} component. If \code{scope} is a single formula, it specifies the \code{upper} component, and the \code{lower} model is empty. If \code{scope} is missing, the initial model is used as the \code{upper} model. Models specified by \code{scope} can be templates to update \code{object} as used by \code{\link{update.formula}}. There is a potential problem in using \code{\link{glm}} fits with a variable \code{scale}, as in that case the deviance is not simply related to the maximized log-likelihood. The \code{glm} method for \code{\link{extractAIC}} makes the appropriate adjustment for a \code{gaussian} family, but may need to be amended for other cases. (The \code{binomial} and \code{poisson} families have fixed \code{scale} by default and do not correspond to a particular maximum-likelihood problem for variable \code{scale}.) Where a conventional deviance exists (e.g. for \code{lm}, \code{aov} and \code{glm} fits) this is quoted in the analysis of variance table: it is the \emph{unscaled} deviance. } \note{ The model fitting must apply the models to the same dataset. This may be a problem if there are missing values and an \code{na.action} other than \code{na.fail} is used (as is the default in \R). We suggest you remove the missing values first. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{addterm}}, \code{\link{dropterm}}, \code{\link{step}} } \examples{ quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) quine.nxt <- update(quine.hi, . ~ . - Eth:Sex:Age:Lrn) quine.stp <- stepAIC(quine.nxt, scope = list(upper = ~Eth*Sex*Age*Lrn, lower = ~1), trace = FALSE) quine.stp$anova cpus1 <- cpus for(v in names(cpus)[2:7]) cpus1[[v]] <- cut(cpus[[v]], unique(quantile(cpus[[v]])), include.lowest = TRUE) cpus0 <- cpus1[, 2:8] # excludes names, authors' predictions cpus.samp <- sample(1:209, 100) cpus.lm <- lm(log10(perf) ~ ., data = cpus1[cpus.samp,2:8]) cpus.lm2 <- stepAIC(cpus.lm, trace = FALSE) cpus.lm2$anova example(birthwt) birthwt.glm <- glm(low ~ ., family = binomial, data = bwt) birthwt.step <- stepAIC(birthwt.glm, trace = FALSE) birthwt.step$anova birthwt.step2 <- stepAIC(birthwt.glm, ~ .^2 + I(scale(age)^2) + I(scale(lwt)^2), trace = FALSE) birthwt.step2$anova quine.nb <- glm.nb(Days ~ .^4, data = quine) quine.nb2 <- stepAIC(quine.nb) quine.nb2$anova } \keyword{models} MASS/man/write.matrix.Rd0000644000176000001440000000247211754562034014560 0ustar ripleyusers% file MASS/man/write.matrix.Rd % copyright (C) 1994-2002 W. N. Venables and B. D. Ripley % \name{write.matrix} \alias{write.matrix} \title{ Write a Matrix or Data Frame } \description{ Writes a matrix or data frame to a file or the console, using column labels and a layout respecting columns. } \usage{ write.matrix(x, file = "", sep = " ", blocksize) } \arguments{ \item{x}{ matrix or data frame. } \item{file}{ name of output file. The default (\code{""}) is the console. } \item{sep}{ The separator between columns. } \item{blocksize}{ If supplied and positive, the output is written in blocks of \code{blocksize} rows. Choose as large as possible consistent with the amount of memory available. }} \details{ If \code{x} is a matrix, supplying \code{blocksize} is more memory-efficient and enables larger matrices to be written, but each block of rows might be formatted slightly differently. If \code{x} is a data frame, the conversion to a matrix may negate the memory saving. } \section{Side Effects}{ A formatted file is produced, with column headings (if \code{x} has them) and columns of data. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{write.table}} } \keyword{file} \keyword{print} MASS/man/predict.lda.Rd0000644000176000001440000000611011754562034014305 0ustar ripleyusers% file MASS/man/predict.lda.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{predict.lda} \alias{predict.lda} \title{ Classify Multivariate Observations by Linear Discrimination } \description{ Classify multivariate observations in conjunction with \code{lda}, and also project data onto the linear discriminants. } \usage{ \method{predict}{lda}(object, newdata, prior = object$prior, dimen, method = c("plug-in", "predictive", "debiased"), \dots) } \arguments{ \item{object}{ object of class \code{"lda"} } \item{newdata}{ data frame of cases to be classified or, if \code{object} has a formula, a data frame with columns of the same names as the variables used. A vector will be interpreted as a row vector. If newdata is missing, an attempt will be made to retrieve the data used to fit the \code{lda} object. } \item{prior}{ The prior probabilities of the classes, by default the proportions in the training set or what was set in the call to \code{lda}. } \item{dimen}{ the dimension of the space to be used. If this is less than \code{min(p, ng-1)}, only the first \code{dimen} discriminant components are used (except for \code{method="predictive"}), and only those dimensions are returned in \code{x}. } \item{method}{ This determines how the parameter estimation is handled. With \code{"plug-in"} (the default) the usual unbiased parameter estimates are used and assumed to be correct. With \code{"debiased"} an unbiased estimator of the log posterior probabilities is used, and with \code{"predictive"} the parameter estimates are integrated out using a vague prior. } \item{\dots}{ arguments based from or to other methods }} \value{ a list with components \item{class}{ The MAP classification (a factor) } \item{posterior}{ posterior probabilities for the classes } \item{x}{ the scores of test cases on up to \code{dimen} discriminant variables }} \details{ This function is a method for the generic function \code{predict()} for class \code{"lda"}. It can be invoked by calling \code{predict(x)} for an object \code{x} of the appropriate class, or directly by calling \code{predict.lda(x)} regardless of the class of the object. Missing values in \code{newdata} are handled by returning \code{NA} if the linear discriminants cannot be evaluated. If \code{newdata} is omitted and the \code{na.action} of the fit omitted cases, these will be omitted on the prediction. This version centres the linear discriminants so that the weighted mean (weighted by \code{prior}) of the group centroids is at the origin. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. Ripley, B. D. (1996) \emph{Pattern Recognition and Neural Networks}. Cambridge University Press. } \seealso{ \code{\link{lda}}, \code{\link{qda}}, \code{\link{predict.qda}} } \examples{ tr <- sample(1:50, 25) train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) z <- lda(train, cl) predict(z, test)$class } \keyword{multivariate} MASS/man/birthwt.Rd0000644000176000001440000000324012171733033013571 0ustar ripleyusers% file MASS/man/birthwt.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{birthwt} \alias{birthwt} \title{ Risk Factors Associated with Low Infant Birth Weight } \description{ The \code{birthwt} data frame has 189 rows and 10 columns. The data were collected at Baystate Medical Center, Springfield, Mass during 1986. } \usage{ birthwt } \format{ This data frame contains the following columns: \describe{ \item{\code{low}}{indicator of birth weight less than 2.5 kg.} \item{\code{age}}{mother's age in years.} \item{\code{lwt}}{mother's weight in pounds at last menstrual period.} \item{\code{race}}{mother's race (\code{1} = white, \code{2} = black, \code{3} = other).} \item{\code{smoke}}{smoking status during pregnancy.} \item{\code{ptl}}{number of previous premature labours.} \item{\code{ht}}{history of hypertension.} \item{\code{ui}}{presence of uterine irritability.} \item{\code{ftv}}{number of physician visits during the first trimester.} \item{\code{bwt}}{birth weight in grams.} } } \source{ Hosmer, D.W. and Lemeshow, S. (1989) \emph{Applied Logistic Regression.} New York: Wiley } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ bwt <- with(birthwt, { race <- factor(race, labels = c("white", "black", "other")) ptd <- factor(ptl > 0) ftv <- factor(ftv) levels(ftv)[-(1:2)] <- "2+" data.frame(low = factor(low), age, lwt, race, smoke = (smoke > 0), ptd, ht = (ht > 0), ui = (ui > 0), ftv) }) options(contrasts = c("contr.treatment", "contr.poly")) glm(low ~ ., binomial, bwt) } \keyword{datasets} MASS/man/mvrnorm.Rd0000644000176000001440000000317412463405616013623 0ustar ripleyusers% file MASS/man/mvrnorm.Rd % copyright (C) 1994-2015 W. N. Venables and B. D. Ripley % \name{mvrnorm} \alias{mvrnorm} \title{Simulate from a Multivariate Normal Distribution} \description{ Produces one or more samples from the specified multivariate normal distribution. } \usage{ mvrnorm(n = 1, mu, Sigma, tol = 1e-6, empirical = FALSE, EISPACK = FALSE) } \arguments{ \item{n}{the number of samples required.} \item{mu}{a vector giving the means of the variables.} \item{Sigma}{a positive-definite symmetric matrix specifying the covariance matrix of the variables.} \item{tol}{tolerance (relative to largest variance) for numerical lack of positive-definiteness in \code{Sigma}.} \item{empirical}{logical. If true, mu and Sigma specify the empirical not population mean and covariance matrix.} \item{EISPACK}{logical: values other than \code{FALSE} are an error.} } \value{ If \code{n = 1} a vector of the same length as \code{mu}, otherwise an \code{n} by \code{length(mu)} matrix with one sample in each row. } \section{Side Effects}{ Causes creation of the dataset \code{.Random.seed} if it does not already exist, otherwise its value is updated. } \details{ The matrix decomposition is done via \code{eigen}; although a Choleski decomposition might be faster, the eigendecomposition is stabler. } \references{ B. D. Ripley (1987) \emph{Stochastic Simulation.} Wiley. Page 98. } \seealso{ \code{\link{rnorm}} } \examples{ Sigma <- matrix(c(10,3,3,2),2,2) Sigma var(mvrnorm(n = 1000, rep(0, 2), Sigma)) var(mvrnorm(n = 1000, rep(0, 2), Sigma, empirical = TRUE)) } \keyword{distribution} \keyword{multivariate} MASS/man/logtrans.Rd0000644000176000001440000000370011754562034013747 0ustar ripleyusers% file MASS/man/logtrans.Rd % copyright (C) 1994-2004 W. N. Venables and B. D. Ripley % \name{logtrans} \alias{logtrans} \alias{logtrans.formula} \alias{logtrans.lm} \alias{logtrans.default} \title{ Estimate log Transformation Parameter } \description{ Find and optionally plot the marginal (profile) likelihood for alpha for a transformation model of the form \code{log(y + alpha) ~ x1 + x2 + \dots}. } \usage{ logtrans(object, ...) \method{logtrans}{default}(object, \dots, alpha = seq(0.5, 6, by = 0.25) - min(y), plotit = TRUE, interp =, xlab = "alpha", ylab = "log Likelihood") \method{logtrans}{formula}(object, data, \dots) \method{logtrans}{lm}(object, \dots) } \arguments{ \item{object}{ Fitted linear model object, or formula defining the untransformed model that is \code{y ~ x1 + x2 + \dots}. The function is generic. } \item{\dots}{ If \code{object} is a formula, this argument may specify a data frame as for \code{lm}. } \item{alpha}{ Set of values for the transformation parameter, alpha. } \item{plotit}{ Should plotting be done? } \item{interp}{ Should the marginal log-likelihood be interpolated with a spline approximation? (Default is \code{TRUE} if plotting is to be done and the number of real points is less than 100.) } \item{xlab}{ as for \code{plot}. } \item{ylab}{ as for \code{plot}. } \item{data}{ optional \code{data} argument for \code{lm} fit. } } \value{ List with components \code{x} (for alpha) and \code{y} (for the marginal log-likelihood values). } \section{Side Effects}{ A plot of the marginal log-likelihood is produced, if requested, together with an approximate mle and 95\% confidence interval. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{boxcox}} } \examples{ logtrans(Days ~ Age*Sex*Eth*Lrn, data = quine, alpha = seq(0.75, 6.5, len=20)) } \keyword{regression} \keyword{models} \keyword{hplot} MASS/man/nlschools.Rd0000644000176000001440000000342113577100651014120 0ustar ripleyusers\name{nlschools} \alias{nlschools} \title{ Eighth-Grade Pupils in the Netherlands } \description{ Snijders and Bosker (1999) use as a running example a study of 2287 eighth-grade pupils (aged about 11) in 132 classes in 131 schools in the Netherlands. Only the variables used in our examples are supplied. } \usage{ nlschools } \format{ This data frame contains 2287 rows and the following columns: \describe{ \item{\code{lang}}{ language test score. } \item{\code{IQ}}{ verbal IQ. } \item{\code{class}}{ class ID. } \item{\code{GS}}{ class size: number of eighth-grade pupils recorded in the class (there may be others: see \code{COMB}, and some may have been omitted with missing values). } \item{\code{SES}}{ social-economic status of pupil's family. } \item{\code{COMB}}{ were the pupils taught in a multi-grade class (\code{0/1})? Classes which contained pupils from grades 7 and 8 are coded \code{1}, but only eighth-graders were tested. } } } \source{ Snijders, T. A. B. and Bosker, R. J. (1999) \emph{Multilevel Analysis. An Introduction to Basic and Advanced Multilevel Modelling.} London: Sage. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ \dontshow{op <- options(digits=5)} nl1 <- within(nlschools, { IQave <- tapply(IQ, class, mean)[as.character(class)] IQ <- IQ - IQave }) cen <- c("IQ", "IQave", "SES") nl1[cen] <- scale(nl1[cen], center = TRUE, scale = FALSE) nl.lme <- nlme::lme(lang ~ IQ*COMB + IQave + SES, random = ~ IQ | class, data = nl1) ## IGNORE_RDIFF_BEGIN summary(nl.lme) ## IGNORE_RDIFF_END \testonly{options(op)} } \keyword{datasets} MASS/man/ships.Rd0000644000176000001440000000164711754562034013254 0ustar ripleyusers% file MASS/man/ships.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{ships} \alias{ships} \title{ Ships Damage Data } \description{ Data frame giving the number of damage incidents and aggregate months of service by ship type, year of construction, and period of operation. } \usage{ ships } \format{ \describe{ \item{\code{type}}{ type: \code{"A"} to \code{"E"}. } \item{\code{year}}{ year of construction: 1960--64, 65--69, 70--74, 75--79 (coded as \code{"60"}, \code{"65"}, \code{"70"}, \code{"75"}). } \item{\code{period}}{ period of operation : 1960--74, 75--79. } \item{\code{service}}{ aggregate months of service. } \item{\code{incidents}}{ number of damage incidents. } } } \source{ P. McCullagh and J. A. Nelder, (1983), \emph{Generalized Linear Models.} Chapman & Hall, section 6.3.2, page 137 } \keyword{datasets} MASS/man/caith.Rd0000644000176000001440000000213711754562034013211 0ustar ripleyusers% file MASS/man/caith.Rd % copyright (C) 1999 W. N. Venables and B. D. Ripley % \name{caith} \alias{caith} \title{ Colours of Eyes and Hair of People in Caithness } \description{ Data on the cross-classification of people in Caithness, Scotland, by eye and hair colour. The region of the UK is particularly interesting as there is a mixture of people of Nordic, Celtic and Anglo-Saxon origin. } \usage{ caith } \format{ A 4 by 5 table with rows the eye colours (blue, light, medium, dark) and columns the hair colours (fair, red, medium, dark, black). } \source{ Fisher, R.A. (1940) The precision of discriminant functions. \emph{Annals of Eugenics (London)} \bold{10}, 422--429. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ corresp(caith) dimnames(caith)[[2]] <- c("F", "R", "M", "D", "B") par(mfcol=c(1,3)) plot(corresp(caith, nf=2)); title("symmetric") plot(corresp(caith, nf=2), type="rows"); title("rows") plot(corresp(caith, nf=2), type="col"); title("columns") par(mfrow=c(1,1)) } \keyword{datasets} MASS/man/menarche.Rd0000644000176000001440000000226711754562034013707 0ustar ripleyusers% file MASS/man/menarche.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{menarche} \alias{menarche} \title{ Age of Menarche in Warsaw } \description{ Proportions of female children at various ages during adolescence who have reached menarche. } \usage{ menarche } \format{ This data frame contains the following columns: \describe{ \item{\code{Age}}{ Average age of the group. (The groups are reasonably age homogeneous.) } \item{\code{Total}}{ Total number of children in the group. } \item{\code{Menarche}}{ Number who have reached menarche. } } } \source{ Milicer, H. and Szczotka, F. (1966) Age at Menarche in Warsaw girls in 1965. \emph{Human Biology} \bold{38}, 199--203. The data are also given in\cr Aranda-Ordaz, F.J. (1981) On two families of transformations to additivity for binary response data. \emph{Biometrika} \bold{68}, 357--363. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \examples{ mprob <- glm(cbind(Menarche, Total - Menarche) ~ Age, binomial(link = probit), data = menarche) } \keyword{datasets} MASS/man/shrimp.Rd0000644000176000001440000000120711754562034013420 0ustar ripleyusers% file MASS/man/shrimp.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{shrimp} \alias{shrimp} \title{ Percentage of Shrimp in Shrimp Cocktail } \description{ A numeric vector with 18 determinations by different laboratories of the amount (percentage of the declared total weight) of shrimp in shrimp cocktail. } \usage{ shrimp } \source{ F. J. King and J. J. Ryan (1976) Collaborative study of the determination of the amount of shrimp in shrimp cocktail. \emph{J. Off. Anal. Chem.} \bold{59}, 644--649. R. G. Staudte and S. J. Sheather (1990) \emph{Robust Estimation and Testing.} Wiley. } \keyword{datasets} MASS/man/polr.Rd0000644000176000001440000001553012317504346013074 0ustar ripleyusers% file MASS/man/polr.Rd % copyright (C) 1994-2014 W. N. Venables and B. D. Ripley % \name{polr} \alias{polr} \title{ Ordered Logistic or Probit Regression } \description{ Fits a logistic or probit regression model to an ordered factor response. The default logistic case is \emph{proportional odds logistic regression}, after which the function is named. } \usage{ polr(formula, data, weights, start, \dots, subset, na.action, contrasts = NULL, Hess = FALSE, model = TRUE, method = c("logistic", "probit", "loglog", "cloglog", "cauchit")) } \arguments{ \item{formula}{ a formula expression as for regression models, of the form \code{response ~ predictors}. The response should be a factor (preferably an ordered factor), which will be interpreted as an ordinal response, with levels ordered as in the factor. The model must have an intercept: attempts to remove one will lead to a warning and be ignored. An offset may be used. See the documentation of \code{\link{formula}} for other details. } \item{data}{ an optional data frame in which to interpret the variables occurring in \code{formula}. } \item{weights}{ optional case weights in fitting. Default to 1. } \item{start}{ initial values for the parameters. This is in the format \code{c(coefficients, zeta)}: see the Values section. } \item{\dots}{ additional arguments to be passed to \code{\link{optim}}, most often a \code{control} argument. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{na.action}{ a function to filter missing data. } \item{contrasts}{ a list of contrasts to be used for some or all of the factors appearing as variables in the model formula. } \item{Hess}{ logical for whether the Hessian (the observed information matrix) should be returned. Use this if you intend to call \code{summary} or \code{vcov} on the fit. } \item{model}{ logical for whether the model matrix should be returned. } \item{method}{ logistic or probit or (complementary) log-log or cauchit (corresponding to a Cauchy latent variable). } } \details{ This model is what Agresti (2002) calls a \emph{cumulative link} model. The basic interpretation is as a \emph{coarsened} version of a latent variable \eqn{Y_i} which has a logistic or normal or extreme-value or Cauchy distribution with scale parameter one and a linear model for the mean. The ordered factor which is observed is which bin \eqn{Y_i} falls into with breakpoints \deqn{\zeta_0 = -\infty < \zeta_1 < \cdots < \zeta_K = \infty}{zeta_0 = -Inf < zeta_1 < \dots < zeta_K = Inf} This leads to the model \deqn{\mbox{logit} P(Y \le k | x) = \zeta_k - \eta}{logit P(Y <= k | x) = zeta_k - eta} with \emph{logit} replaced by \emph{probit} for a normal latent variable, and \eqn{\eta}{eta} being the linear predictor, a linear function of the explanatory variables (with no intercept). Note that it is quite common for other software to use the opposite sign for \eqn{\eta}{eta} (and hence the coefficients \code{beta}). In the logistic case, the left-hand side of the last display is the log odds of category \eqn{k} or less, and since these are log odds which differ only by a constant for different \eqn{k}, the odds are proportional. Hence the term \emph{proportional odds logistic regression}. The log-log and complementary log-log links are the increasing functions \eqn{F^{-1}(p) = -log(-log(p))}{F^-1(p) = -log(-log(p))} and \eqn{F^{-1}(p) = log(-log(1-p))}{F^-1(p) = log(-log(1-p))}; some call the first the \sQuote{negative log-log} link. These correspond to a latent variable with the extreme-value distribution for the maximum and minimum respectively. A \emph{proportional hazards} model for grouped survival times can be obtained by using the complementary log-log link with grouping ordered by increasing times. \code{\link{predict}}, \code{\link{summary}}, \code{\link{vcov}}, \code{\link{anova}}, \code{\link{model.frame}} and an \code{extractAIC} method for use with \code{\link{stepAIC}} (and \code{\link{step}}). There are also \code{\link{profile}} and \code{\link{confint}} methods. } \value{ A object of class \code{"polr"}. This has components \item{coefficients}{the coefficients of the linear predictor, which has no intercept.} \item{zeta}{the intercepts for the class boundaries.} \item{deviance}{the residual deviance.} \item{fitted.values}{a matrix, with a column for each level of the response.} \item{lev}{the names of the response levels.} \item{terms}{the \code{terms} structure describing the model.} \item{df.residual}{the number of residual degrees of freedoms, calculated using the weights.} \item{edf}{the (effective) number of degrees of freedom used by the model} \item{n, nobs}{the (effective) number of observations, calculated using the weights. (\code{nobs} is for use by \code{\link{stepAIC}}.} \item{call}{the matched call.} \item{method}{the matched method used.} \item{convergence}{the convergence code returned by \code{optim}.} \item{niter}{the number of function and gradient evaluations used by \code{optim}.} \item{lp}{the linear predictor (including any offset).} \item{Hessian}{(if \code{Hess} is true). Note that this is a numerical approximation derived from the optimization proces.} \item{model}{(if \code{model} is true).} } \note{ The \code{\link{vcov}} method uses the approximate Hessian: for reliable results the model matrix should be sensibly scaled with all columns having range the order of one. Prior to version 7.3-32, \code{method = "cloglog"} confusingly gave the log-log link, implicitly assuming the first response level was the \sQuote{best}. } \references{ Agresti, A. (2002) \emph{Categorical Data.} Second edition. Wiley. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{optim}}, \code{\link{glm}}, \code{\link[nnet]{multinom}}. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) house.plr <- polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) house.plr summary(house.plr, digits = 3) ## slightly worse fit from summary(update(house.plr, method = "probit", Hess = TRUE), digits = 3) ## although it is not really appropriate, can fit summary(update(house.plr, method = "loglog", Hess = TRUE), digits = 3) summary(update(house.plr, method = "cloglog", Hess = TRUE), digits = 3) predict(house.plr, housing, type = "p") addterm(house.plr, ~.^2, test = "Chisq") house.plr2 <- stepAIC(house.plr, ~.^2) house.plr2$anova anova(house.plr, house.plr2) house.plr <- update(house.plr, Hess=TRUE) pr <- profile(house.plr) confint(pr) plot(pr) pairs(pr) } \keyword{models} MASS/man/npr1.Rd0000644000176000001440000000177212463405616013005 0ustar ripleyusers% file MASS/man/npr1.Rd % copyright (C) 1994-2014 W. N. Venables and B. D. Ripley % \name{npr1} \alias{npr1} \title{ US Naval Petroleum Reserve No. 1 data } \description{ Data on the locations, porosity and permeability (a measure of oil flow) on 104 oil wells in the US Naval Petroleum Reserve No. 1 in California. } \usage{ npr1 } \format{ This data frame contains the following columns: \describe{ \item{\code{x}}{ x coordinates, in miles (origin unspecified).. } \item{\code{y}}{ y coordinates, in miles. } \item{\code{perm}}{ permeability in milli-Darcies. } \item{\code{por}}{ porosity (\%). } } } \source{ Maher, J.C., Carter, R.D. and Lantz, R.J. (1975) Petroleum geology of Naval Petroleum Reserve No. 1, Elk Hills, Kern County, California. \emph{USGS Professional Paper} \bold{912}. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \keyword{datasets} MASS/man/isoMDS.Rd0000644000176000001440000000556511754562034013267 0ustar ripleyusers% file MASS/man/man/isoMDS.Rd % copyright (C) 1994-2004 W. N. Venables and B. D. Ripley % \name{isoMDS} \alias{isoMDS} \alias{Shepard} \title{ Kruskal's Non-metric Multidimensional Scaling } \description{ One form of non-metric multidimensional scaling } \usage{ isoMDS(d, y = cmdscale(d, k), k = 2, maxit = 50, trace = TRUE, tol = 1e-3, p = 2) Shepard(d, x, p = 2) } \arguments{ \item{d}{ distance structure of the form returned by \code{dist}, or a full, symmetric matrix. Data are assumed to be dissimilarities or relative distances, but must be positive except for self-distance. Both missing and infinite values are allowed. } \item{y}{ An initial configuration. If none is supplied, \code{cmdscale} is used to provide the classical solution, unless there are missing or infinite dissimilarities. } \item{k}{ The desired dimension for the solution, passed to \code{cmdscale}. } \item{maxit}{ The maximum number of iterations. } \item{trace}{ Logical for tracing optimization. Default \code{TRUE}. } \item{tol}{ convergence tolerance. } \item{p}{Power for Minkowski distance in the configuration space.} \item{x}{A final configuration.} } \value{ Two components: \item{points}{ A k-column vector of the fitted configuration. } \item{stress}{ The final stress achieved (in percent). }} \section{Side Effects}{ If \code{trace} is true, the initial stress and the current stress are printed out every 5 iterations. } \details{ This chooses a k-dimensional (default k = 2) configuration to minimize the stress, the square root of the ratio of the sum of squared differences between the input distances and those of the configuration to the sum of configuration distances squared. However, the input distances are allowed a monotonic transformation. An iterative algorithm is used, which will usually converge in around 10 iterations. As this is necessarily an \eqn{O(n^2)} calculation, it is slow for large datasets. Further, since for the default \eqn{p = 2} the configuration is only determined up to rotations and reflections (by convention the centroid is at the origin), the result can vary considerably from machine to machine. } \references{ T. F. Cox and M. A. A. Cox (1994, 2001) \emph{Multidimensional Scaling}. Chapman & Hall. Ripley, B. D. (1996) \emph{Pattern Recognition and Neural Networks}. Cambridge University Press. Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{cmdscale}}, \code{\link{sammon}} } \examples{ swiss.x <- as.matrix(swiss[, -1]) swiss.dist <- dist(swiss.x) swiss.mds <- isoMDS(swiss.dist) plot(swiss.mds$points, type = "n") text(swiss.mds$points, labels = as.character(1:nrow(swiss.x))) swiss.sh <- Shepard(swiss.dist, swiss.mds$points) plot(swiss.sh, pch = ".") lines(swiss.sh$x, swiss.sh$yf, type = "S") } \keyword{multivariate} MASS/man/denumerate.Rd0000644000176000001440000000346112463405616014253 0ustar ripleyusers% file MASS/man/denumerate.Rd % copyright (C) 2000-2015 W. N. Venables and B. D. Ripley % \name{denumerate} \alias{denumerate} \alias{denumerate.formula} \title{ Transform an Allowable Formula for 'loglm' into one for 'terms' } \description{ \code{\link{loglm}} allows dimension numbers to be used in place of names in the formula. \code{denumerate} modifies such a formula into one that \code{\link{terms}} can process. } \usage{ denumerate(x) } \arguments{ \item{x}{ A formula conforming to the conventions of \code{\link{loglm}}, that is, it may allow dimension numbers to stand in for names when specifying a log-linear model. }} \value{ A linear model formula like that presented, except that where dimension numbers, say \code{n}, have been used to specify fixed margins these are replaced by names of the form \code{.vn} which may be processed by \code{terms}. } \details{ The model fitting function \code{\link{loglm}} fits log-linear models to frequency data using iterative proportional scaling. To specify the model the user must nominate the margins in the data that remain fixed under the log-linear model. It is convenient to allow the user to use dimension numbers, 1, 2, 3, \dots for the first, second, third, \dots, margins in a similar way to variable names. As the model formula has to be parsed by \code{\link{terms}}, which treats \code{1} in a special way and requires parseable variable names, these formulae have to be modified by giving genuine names for these margin, or dimension numbers. \code{denumerate} replaces these numbers with names of a special form, namely \code{n} is replaced by \code{.vn}. This allows \code{terms} to parse the formula in the usual way. } \seealso{ \code{\link{renumerate}} } \examples{ denumerate(~(1+2+3)^3 + a/b) ## which gives ~ (.v1 + .v2 + .v3)^3 + a/b } \keyword{models} MASS/man/coop.Rd0000644000176000001440000000214711754562034013062 0ustar ripleyusers% file MASS/man/coop.Rd % copyright (C) 1994-9 W. N. Venables and B. D. Ripley % \name{coop} \alias{coop} \title{ Co-operative Trial in Analytical Chemistry } \description{ Seven specimens were sent to 6 laboratories in 3 separate batches and each analysed for Analyte. Each analysis was duplicated. } \usage{ coop } \format{ This data frame contains the following columns: \describe{ \item{\code{Lab}}{ Laboratory, \code{L1}, \code{L2}, \dots, \code{L6}. } \item{\code{Spc}}{ Specimen, \code{S1}, \code{S2}, \dots, \code{S7}. } \item{\code{Bat}}{ Batch, \code{B1}, \code{B2}, \code{B3} (nested within \code{Spc/Lab}), } \item{\code{Conc}}{ Concentration of Analyte in \eqn{g/kg}. } } } \source{ Analytical Methods Committee (1987) Recommendations for the conduct and interpretation of co-operative trials, \emph{The Analyst} \bold{112}, 679--686. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Fourth edition. Springer. } \seealso{ \code{\link{chem}}, \code{\link{abbey}}. } \keyword{datasets} MASS/DESCRIPTION0000644000176000001440000000255213577111143012562 0ustar ripleyusersPackage: MASS Priority: recommended Version: 7.3-51.5 Date: 2019-12-20 Revision: $Rev: 3506 $ Depends: R (>= 3.1.0), grDevices, graphics, stats, utils Imports: methods Suggests: lattice, nlme, nnet, survival Authors@R: c(person("Brian", "Ripley", role = c("aut", "cre", "cph"), email = "ripley@stats.ox.ac.uk"), person("Bill", "Venables", role = "ctb"), person(c("Douglas", "M."), "Bates", role = "ctb"), person("Kurt", "Hornik", role = "trl", comment = "partial port ca 1998"), person("Albrecht", "Gebhardt", role = "trl", comment = "partial port ca 1998"), person("David", "Firth", role = "ctb")) Description: Functions and datasets to support Venables and Ripley, "Modern Applied Statistics with S" (4th edition, 2002). Title: Support Functions and Datasets for Venables and Ripley's MASS LazyData: yes ByteCompile: yes License: GPL-2 | GPL-3 URL: http://www.stats.ox.ac.uk/pub/MASS4/ Contact: NeedsCompilation: yes Packaged: 2019-12-20 08:20:12 UTC; ripley Author: Brian Ripley [aut, cre, cph], Bill Venables [ctb], Douglas M. Bates [ctb], Kurt Hornik [trl] (partial port ca 1998), Albrecht Gebhardt [trl] (partial port ca 1998), David Firth [ctb] Maintainer: Brian Ripley Repository: CRAN Date/Publication: 2019-12-20 09:20:35 UTC MASS/tests/0000755000176000001440000000000013577102074012215 5ustar ripleyusersMASS/tests/regression.Rout.save0000644000176000001440000000312712315120517016177 0ustar ripleyusers R version 2.10.0 Under development (unstable) (2009-07-31 r49037) Copyright (C) 2009 The R Foundation for Statistical Computing ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ### regression tests > > library(MASS) > > contr.sdif(6) 2-1 3-2 4-3 5-4 6-5 1 -0.8333333 -0.6666667 -0.5 -0.3333333 -0.1666667 2 0.1666667 -0.6666667 -0.5 -0.3333333 -0.1666667 3 0.1666667 0.3333333 -0.5 -0.3333333 -0.1666667 4 0.1666667 0.3333333 0.5 -0.3333333 -0.1666667 5 0.1666667 0.3333333 0.5 0.6666667 -0.1666667 6 0.1666667 0.3333333 0.5 0.6666667 0.8333333 > contr.sdif(6, sparse=TRUE) 2-1 3-2 4-3 5-4 6-5 1 -0.8333333 -0.6666667 -0.5 -0.3333333 -0.1666667 2 0.1666667 -0.6666667 -0.5 -0.3333333 -0.1666667 3 0.1666667 0.3333333 -0.5 -0.3333333 -0.1666667 4 0.1666667 0.3333333 0.5 -0.3333333 -0.1666667 5 0.1666667 0.3333333 0.5 0.6666667 -0.1666667 6 0.1666667 0.3333333 0.5 0.6666667 0.8333333 > stopifnot(all(contr.sdif(6) == contr.sdif(6, sparse=TRUE))) > > > proc.time() user system elapsed 2.298 0.078 2.377 MASS/tests/regression.R0000644000176000001440000000021312315120517014503 0ustar ripleyusers### regression tests library(MASS) contr.sdif(6) contr.sdif(6, sparse=TRUE) stopifnot(all(contr.sdif(6) == contr.sdif(6, sparse=TRUE))) MASS/tests/polr.R0000644000176000001440000000265412032540723013313 0ustar ripleyusers## tests from David Firth 2004-Oct-13 library(MASS) y <- structure(as.integer(c(1, 2, 3, 1, 2, 3)), .Label = c("1", "2", "3"), class = c("ordered", "factor")) Freq <- c(10, 0, 10, 10, 0, 10) group <- structure(as.integer(c(1, 1, 1, 2, 2, 2)), .Label = c("1", "2"), class = "factor") temp <- polr(y ~ group, weights = Freq) temp$convergence temp stopifnot(all(abs(coef(temp)) < 1e-4)) Freq <- c(1000000, 1, 1000000, 1000000, 1, 1000000) temp2 <- polr(y ~ group, weights = Freq) temp2 stopifnot(all(abs(coef(temp2)) < 1e-4)) ## tests of rank-deficient model matrix group <- factor(c(1, 1, 1, 2, 2, 2), levels=1:3) polr(y ~ group, weights = Freq) group <- factor(c(1, 1, 1, 3, 3, 3), levels=1:3) polr(y ~ group, weights = Freq) ## profile on a single-coef model ## data from McCullagh JRSSB 1980 tonsils <- data.frame(carrier = factor(rep(c('yes', 'no'), each=3)), size = ordered(rep(c(1,2,3),2)), count = c(19,29,24,497,560,269)) m <- polr(size ~ carrier, data = tonsils, weights = count) confint(m) ## refitting needs transformed starting values (Achim Zeileis Mar 2010) load("BankWages.rda") # from AER bw <- polr(job ~ education, data = BankWages) summary(bw) ## failed due to incorrect restarting values ## missing drop = FALSE in profiling (Joris Meys, Sep 2012) house.plr <- polr(Sat ~ Cont, weights = Freq, data = housing) pr <- profile(house.plr) plot(pr) MASS/tests/fitdistr.R0000644000176000001440000000065111754562033014172 0ustar ripleyusers## a quick check on vcov for fitdistr (from 7.3-6/7) library(MASS) options(digits=4) set.seed(1) x <- rnorm(100) fit <- fitdistr(x, "normal") fit vcov(fit) x <- rlnorm(100) fit <- fitdistr(x, "lognormal") fit vcov(fit) x <- rpois(100, 4.5) fit <- fitdistr(x, "poisson") fit vcov(fit) x <- rexp(100, 13) fit <- fitdistr(x, "exponential") fit vcov(fit) x <- rgeom(100, 0.25) fit <- fitdistr(x, "geometric") fit vcov(fit) MASS/tests/Examples/0000755000176000001440000000000013577102074013773 5ustar ripleyusersMASS/tests/Examples/MASS-Ex.Rout.save0000644000176000001440000047571413577100750016741 0ustar ripleyusers R Under development (unstable) (2019-12-19 r77606) -- "Unsuffered Consequences" Copyright (C) 2019 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > pkgname <- "MASS" > source(file.path(R.home("share"), "R", "examples-header.R")) > options(warn = 1) > library('MASS') > > base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') > base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv') > cleanEx() > nameEx("Insurance") > ### * Insurance > > flush(stderr()); flush(stdout()) > > ### Name: Insurance > ### Title: Numbers of Car Insurance claims > ### Aliases: Insurance > ### Keywords: datasets > > ### ** Examples > > ## main-effects fit as Poisson GLM with offset > glm(Claims ~ District + Group + Age + offset(log(Holders)), + data = Insurance, family = poisson) Call: glm(formula = Claims ~ District + Group + Age + offset(log(Holders)), family = poisson, data = Insurance) Coefficients: (Intercept) District2 District3 District4 Group.L Group.Q -1.810508 0.025868 0.038524 0.234205 0.429708 0.004632 Group.C Age.L Age.Q Age.C -0.029294 -0.394432 -0.000355 -0.016737 Degrees of Freedom: 63 Total (i.e. Null); 54 Residual Null Deviance: 236.3 Residual Deviance: 51.42 AIC: 388.7 > > # same via loglm > loglm(Claims ~ District + Group + Age + offset(log(Holders)), + data = Insurance) Call: loglm(formula = Claims ~ District + Group + Age + offset(log(Holders)), data = Insurance) Statistics: X^2 df P(> X^2) Likelihood Ratio 51.42003 54 0.5745071 Pearson 48.62933 54 0.6809086 > > > > cleanEx() > nameEx("Null") > ### * Null > > flush(stderr()); flush(stdout()) > > ### Name: Null > ### Title: Null Spaces of Matrices > ### Aliases: Null > ### Keywords: algebra > > ### ** Examples > > # The function is currently defined as > function(M) + { + tmp <- qr(M) + set <- if(tmp$rank == 0L) seq_len(ncol(M)) else -seq_len(tmp$rank) + qr.Q(tmp, complete = TRUE)[, set, drop = FALSE] + } function (M) { tmp <- qr(M) set <- if (tmp$rank == 0L) seq_len(ncol(M)) else -seq_len(tmp$rank) qr.Q(tmp, complete = TRUE)[, set, drop = FALSE] } > > > > cleanEx() > nameEx("OME") > ### * OME > > flush(stderr()); flush(stdout()) > > ### Name: OME > ### Title: Tests of Auditory Perception in Children with OME > ### Aliases: OME > ### Keywords: datasets > > ### ** Examples > > # Fit logistic curve from p = 0.5 to p = 1.0 > fp1 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/scal)), + c("L75", "scal"), + function(x,L75,scal)NULL) > nls(Correct/Trials ~ fp1(Loud, L75, scal), data = OME, + start = c(L75=45, scal=3)) Nonlinear regression model model: Correct/Trials ~ fp1(Loud, L75, scal) data: OME L75 scal 44.149 3.775 residual sum-of-squares: 69.88 Number of iterations to convergence: 4 Achieved convergence tolerance: 7.016e-06 > nls(Correct/Trials ~ fp1(Loud, L75, scal), + data = OME[OME$Noise == "coherent",], + start=c(L75=45, scal=3)) Nonlinear regression model model: Correct/Trials ~ fp1(Loud, L75, scal) data: OME[OME$Noise == "coherent", ] L75 scal 47.993 1.259 residual sum-of-squares: 30.35 Number of iterations to convergence: 5 Achieved convergence tolerance: 4.895e-06 > nls(Correct/Trials ~ fp1(Loud, L75, scal), + data = OME[OME$Noise == "incoherent",], + start = c(L75=45, scal=3)) Nonlinear regression model model: Correct/Trials ~ fp1(Loud, L75, scal) data: OME[OME$Noise == "incoherent", ] L75 scal 38.87 2.17 residual sum-of-squares: 23.73 Number of iterations to convergence: 11 Achieved convergence tolerance: 3.846e-06 > > # individual fits for each experiment > > aa <- factor(OME$Age) > ab <- 10*OME$ID + unclass(aa) > ac <- unclass(factor(ab)) > OME$UID <- as.vector(ac) > OME$UIDn <- OME$UID + 0.1*(OME$Noise == "incoherent") > rm(aa, ab, ac) > OMEi <- OME > > library(nlme) > fp2 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/2)), + "L75", function(x,L75) NULL) > dec <- getOption("OutDec") > options(show.error.messages = FALSE, OutDec=".") > OMEi.nls <- nlsList(Correct/Trials ~ fp2(Loud, L75) | UIDn, + data = OMEi, start = list(L75=45), control = list(maxiter=100)) > options(show.error.messages = TRUE, OutDec=dec) > tmp <- sapply(OMEi.nls, function(X) + {if(is.null(X)) NA else as.vector(coef(X))}) > OMEif <- data.frame(UID = round(as.numeric((names(tmp)))), + Noise = rep(c("coherent", "incoherent"), 110), + L75 = as.vector(tmp), stringsAsFactors = TRUE) > OMEif$Age <- OME$Age[match(OMEif$UID, OME$UID)] > OMEif$OME <- OME$OME[match(OMEif$UID, OME$UID)] > OMEif <- OMEif[OMEif$L75 > 30,] > summary(lm(L75 ~ Noise/Age, data = OMEif, na.action = na.omit)) Call: lm(formula = L75 ~ Noise/Age, data = OMEif, na.action = na.omit) Residuals: Min 1Q Median 3Q Max -13.0022 -1.9878 0.3346 2.0229 16.3260 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 47.73580 0.76456 62.435 < 2e-16 *** Noiseincoherent -4.87352 1.11247 -4.381 1.92e-05 *** Noisecoherent:Age -0.02785 0.02349 -1.186 0.237 Noiseincoherent:Age -0.12219 0.02589 -4.719 4.50e-06 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 3.774 on 196 degrees of freedom (17 observations deleted due to missingness) Multiple R-squared: 0.5246, Adjusted R-squared: 0.5173 F-statistic: 72.09 on 3 and 196 DF, p-value: < 2.2e-16 > summary(lm(L75 ~ Noise/(Age + OME), data = OMEif, + subset = (Age >= 30 & Age <= 60), + na.action = na.omit), cor = FALSE) Call: lm(formula = L75 ~ Noise/(Age + OME), data = OMEif, subset = (Age >= 30 & Age <= 60), na.action = na.omit) Residuals: Min 1Q Median 3Q Max -10.4514 -2.0588 0.0194 1.6827 15.9738 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 50.21090 1.74482 28.777 < 2e-16 *** Noiseincoherent -5.97491 2.70148 -2.212 0.02890 * Noisecoherent:Age -0.09358 0.03586 -2.609 0.01023 * Noiseincoherent:Age -0.15155 0.04151 -3.651 0.00039 *** Noisecoherent:OMElow 0.45103 1.07594 0.419 0.67583 Noiseincoherent:OMElow -0.14075 1.24537 -0.113 0.91021 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 3.7 on 119 degrees of freedom (17 observations deleted due to missingness) Multiple R-squared: 0.6073, Adjusted R-squared: 0.5908 F-statistic: 36.81 on 5 and 119 DF, p-value: < 2.2e-16 > > # Or fit by weighted least squares > fpl75 <- deriv(~ sqrt(n)*(r/n - 0.5 - 0.5/(1 + exp(-(x-L75)/scal))), + c("L75", "scal"), + function(r,n,x,L75,scal) NULL) > nls(0 ~ fpl75(Correct, Trials, Loud, L75, scal), + data = OME[OME$Noise == "coherent",], + start = c(L75=45, scal=3)) Nonlinear regression model model: 0 ~ fpl75(Correct, Trials, Loud, L75, scal) data: OME[OME$Noise == "coherent", ] L75 scal 47.798 1.296 residual sum-of-squares: 91.72 Number of iterations to convergence: 5 Achieved convergence tolerance: 9.302e-06 > nls(0 ~ fpl75(Correct, Trials, Loud, L75, scal), + data = OME[OME$Noise == "incoherent",], + start = c(L75=45, scal=3)) Nonlinear regression model model: 0 ~ fpl75(Correct, Trials, Loud, L75, scal) data: OME[OME$Noise == "incoherent", ] L75 scal 38.553 2.078 residual sum-of-squares: 60.19 Number of iterations to convergence: 8 Achieved convergence tolerance: 4.55e-06 > > # Test to see if the curves shift with age > fpl75age <- deriv(~sqrt(n)*(r/n - 0.5 - 0.5/(1 + + exp(-(x-L75-slope*age)/scal))), + c("L75", "slope", "scal"), + function(r,n,x,age,L75,slope,scal) NULL) > OME.nls1 <- + nls(0 ~ fpl75age(Correct, Trials, Loud, Age, L75, slope, scal), + data = OME[OME$Noise == "coherent",], + start = c(L75=45, slope=0, scal=2)) > sqrt(diag(vcov(OME.nls1))) L75 slope scal 0.61091761 0.01665916 0.17566450 > > OME.nls2 <- + nls(0 ~ fpl75age(Correct, Trials, Loud, Age, L75, slope, scal), + data = OME[OME$Noise == "incoherent",], + start = c(L75=45, slope=0, scal=2)) > sqrt(diag(vcov(OME.nls2))) L75 slope scal 0.49553854 0.01348281 0.24453836 > > # Now allow random effects by using NLME > OMEf <- OME[rep(1:nrow(OME), OME$Trials),] > OMEf$Resp <- with(OME, rep(rep(c(1,0), length(Trials)), + t(cbind(Correct, Trials-Correct)))) > OMEf <- OMEf[, -match(c("Correct", "Trials"), names(OMEf))] > > ## Not run: > ##D ## these fail in R on most platforms > ##D fp2 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/exp(lsc))), > ##D c("L75", "lsc"), > ##D function(x, L75, lsc) NULL) > ##D try(summary(nlme(Resp ~ fp2(Loud, L75, lsc), > ##D fixed = list(L75 ~ Age, lsc ~ 1), > ##D random = L75 + lsc ~ 1 | UID, > ##D data = OMEf[OMEf$Noise == "coherent",], method = "ML", > ##D start = list(fixed=c(L75=c(48.7, -0.03), lsc=0.24)), verbose = TRUE))) > ##D > ##D try(summary(nlme(Resp ~ fp2(Loud, L75, lsc), > ##D fixed = list(L75 ~ Age, lsc ~ 1), > ##D random = L75 + lsc ~ 1 | UID, > ##D data = OMEf[OMEf$Noise == "incoherent",], method = "ML", > ##D start = list(fixed=c(L75=c(41.5, -0.1), lsc=0)), verbose = TRUE))) > ## End(Not run) > > > cleanEx() detaching ‘package:nlme’ > nameEx("Skye") > ### * Skye > > flush(stderr()); flush(stdout()) > > ### Name: Skye > ### Title: AFM Compositions of Aphyric Skye Lavas > ### Aliases: Skye > ### Keywords: datasets > > ### ** Examples > > # ternary() is from the on-line answers. > ternary <- function(X, pch = par("pch"), lcex = 1, + add = FALSE, ord = 1:3, ...) + { + X <- as.matrix(X) + if(any(X < 0)) stop("X must be non-negative") + s <- drop(X %*% rep(1, ncol(X))) + if(any(s<=0)) stop("each row of X must have a positive sum") + if(max(abs(s-1)) > 1e-6) { + warning("row(s) of X will be rescaled") + X <- X / s + } + X <- X[, ord] + s3 <- sqrt(1/3) + if(!add) + { + oldpty <- par("pty") + on.exit(par(pty=oldpty)) + par(pty="s") + plot(c(-s3, s3), c(0.5-s3, 0.5+s3), type="n", axes=FALSE, + xlab="", ylab="") + polygon(c(0, -s3, s3), c(1, 0, 0), density=0) + lab <- NULL + if(!is.null(dn <- dimnames(X))) lab <- dn[[2]] + if(length(lab) < 3) lab <- as.character(1:3) + eps <- 0.05 * lcex + text(c(0, s3+eps*0.7, -s3-eps*0.7), + c(1+eps, -0.1*eps, -0.1*eps), lab, cex=lcex) + } + points((X[,2] - X[,3])*s3, X[,1], ...) + } > > ternary(Skye/100, ord=c(1,3,2)) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("addterm") > ### * addterm > > flush(stderr()); flush(stdout()) > > ### Name: addterm > ### Title: Try All One-Term Additions to a Model > ### Aliases: addterm addterm.default addterm.glm addterm.lm > ### Keywords: models > > ### ** Examples > > quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) > quine.lo <- aov(log(Days+2.5) ~ 1, quine) > addterm(quine.lo, quine.hi, test="F") Single term additions Model: log(Days + 2.5) ~ 1 Df Sum of Sq RSS AIC F Value Pr(F) 106.787 -43.664 Eth 1 10.6820 96.105 -57.052 16.0055 0.0001006 *** Sex 1 0.5969 106.190 -42.483 0.8094 0.3698057 Age 3 4.7469 102.040 -44.303 2.2019 0.0904804 . Lrn 1 0.0043 106.783 -41.670 0.0058 0.9392083 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family=poisson, + data=housing) > addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test="Chisq") Single term additions Model: Freq ~ Infl * Type * Cont + Sat Df Deviance AIC LRT Pr(Chi) 217.46 610.43 Infl:Sat 4 111.08 512.05 106.371 < 2.2e-16 *** Type:Sat 6 156.79 561.76 60.669 3.292e-11 *** Cont:Sat 2 212.33 609.30 5.126 0.07708 . --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) > addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq") Single term additions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Type:Sat 12 16.107 457.08 22.5550 0.03175 * Infl:Cont:Sat 4 37.472 462.44 1.1901 0.87973 Type:Cont:Sat 6 28.256 457.23 10.4064 0.10855 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("anova.negbin") > ### * anova.negbin > > flush(stderr()); flush(stdout()) > > ### Name: anova.negbin > ### Title: Likelihood Ratio Tests for Negative Binomial GLMs > ### Aliases: anova.negbin > ### Keywords: regression > > ### ** Examples > > m1 <- glm.nb(Days ~ Eth*Age*Lrn*Sex, quine, link = log) > m2 <- update(m1, . ~ . - Eth:Age:Lrn:Sex) > anova(m2, m1) Likelihood ratio tests of Negative Binomial Models Response: Days Model 1 Eth + Age + Lrn + Sex + Eth:Age + Eth:Lrn + Age:Lrn + Eth:Sex + Age:Sex + Lrn:Sex + Eth:Age:Lrn + Eth:Age:Sex + Eth:Lrn:Sex + Age:Lrn:Sex 2 Eth * Age * Lrn * Sex theta Resid. df 2 x log-lik. Test df LR stat. Pr(Chi) 1 1.90799 120 -1040.728 2 1.92836 118 -1039.324 1 vs 2 2 1.403843 0.4956319 > anova(m2) Warning in anova.negbin(m2) : tests made without re-estimating 'theta' Analysis of Deviance Table Model: Negative Binomial(1.908), link: log Response: Days Terms added sequentially (first to last) Df Deviance Resid. Df Resid. Dev Pr(>Chi) NULL 145 270.03 Eth 1 19.0989 144 250.93 1.241e-05 *** Age 3 16.3483 141 234.58 0.000962 *** Lrn 1 3.5449 140 231.04 0.059730 . Sex 1 0.3989 139 230.64 0.527666 Eth:Age 3 14.6030 136 216.03 0.002189 ** Eth:Lrn 1 0.0447 135 215.99 0.832601 Age:Lrn 2 1.7482 133 214.24 0.417240 Eth:Sex 1 1.1470 132 213.09 0.284183 Age:Sex 3 21.9746 129 191.12 6.603e-05 *** Lrn:Sex 1 0.0277 128 191.09 0.867712 Eth:Age:Lrn 2 9.0099 126 182.08 0.011054 * Eth:Age:Sex 3 4.8218 123 177.26 0.185319 Eth:Lrn:Sex 1 3.3160 122 173.94 0.068608 . Age:Lrn:Sex 2 6.3941 120 167.55 0.040882 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("area") > ### * area > > flush(stderr()); flush(stdout()) > > ### Name: area > ### Title: Adaptive Numerical Integration > ### Aliases: area > ### Keywords: nonlinear > > ### ** Examples > > area(sin, 0, pi) # integrate the sin function from 0 to pi. [1] 2 > > > > cleanEx() > nameEx("bacteria") > ### * bacteria > > flush(stderr()); flush(stdout()) > > ### Name: bacteria > ### Title: Presence of Bacteria after Drug Treatments > ### Aliases: bacteria > ### Keywords: datasets > > ### ** Examples > > contrasts(bacteria$trt) <- structure(contr.sdif(3), + dimnames = list(NULL, c("drug", "encourage"))) > ## fixed effects analyses > summary(glm(y ~ trt * week, binomial, data = bacteria)) Call: glm(formula = y ~ trt * week, family = binomial, data = bacteria) Deviance Residuals: Min 1Q Median 3Q Max -2.2144 0.4245 0.5373 0.6750 1.0697 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.97548 0.30053 6.573 4.92e-11 *** trtdrug -0.99848 0.69490 -1.437 0.15075 trtencourage 0.83865 0.73482 1.141 0.25374 week -0.11814 0.04460 -2.649 0.00807 ** trtdrug:week -0.01722 0.10570 -0.163 0.87061 trtencourage:week -0.07043 0.10964 -0.642 0.52060 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for binomial family taken to be 1) Null deviance: 217.38 on 219 degrees of freedom Residual deviance: 203.12 on 214 degrees of freedom AIC: 215.12 Number of Fisher Scoring iterations: 4 > summary(glm(y ~ trt + week, binomial, data = bacteria)) Call: glm(formula = y ~ trt + week, family = binomial, data = bacteria) Deviance Residuals: Min 1Q Median 3Q Max -2.2899 0.3885 0.5400 0.7027 1.1077 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.96018 0.29705 6.599 4.15e-11 *** trtdrug -1.10667 0.42519 -2.603 0.00925 ** trtencourage 0.45502 0.42766 1.064 0.28735 week -0.11577 0.04414 -2.623 0.00872 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for binomial family taken to be 1) Null deviance: 217.38 on 219 degrees of freedom Residual deviance: 203.81 on 216 degrees of freedom AIC: 211.81 Number of Fisher Scoring iterations: 4 > summary(glm(y ~ trt + I(week > 2), binomial, data = bacteria)) Call: glm(formula = y ~ trt + I(week > 2), family = binomial, data = bacteria) Deviance Residuals: Min 1Q Median 3Q Max -2.4043 0.3381 0.5754 0.6237 1.0051 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 2.2479 0.3560 6.315 2.71e-10 *** trtdrug -1.1187 0.4288 -2.609 0.00909 ** trtencourage 0.4815 0.4330 1.112 0.26614 I(week > 2)TRUE -1.2949 0.4104 -3.155 0.00160 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for binomial family taken to be 1) Null deviance: 217.38 on 219 degrees of freedom Residual deviance: 199.18 on 216 degrees of freedom AIC: 207.18 Number of Fisher Scoring iterations: 5 > > # conditional random-effects analysis > library(survival) > bacteria$Time <- rep(1, nrow(bacteria)) > coxph(Surv(Time, unclass(y)) ~ week + strata(ID), + data = bacteria, method = "exact") Call: coxph(formula = Surv(Time, unclass(y)) ~ week + strata(ID), data = bacteria, method = "exact") coef exp(coef) se(coef) z p week -0.16256 0.84996 0.05472 -2.971 0.00297 Likelihood ratio test=9.85 on 1 df, p=0.001696 n= 220, number of events= 177 > coxph(Surv(Time, unclass(y)) ~ factor(week) + strata(ID), + data = bacteria, method = "exact") Call: coxph(formula = Surv(Time, unclass(y)) ~ factor(week) + strata(ID), data = bacteria, method = "exact") coef exp(coef) se(coef) z p factor(week)2 0.1983 1.2193 0.7241 0.274 0.7842 factor(week)4 -1.4206 0.2416 0.6665 -2.131 0.0331 factor(week)6 -1.6615 0.1899 0.6825 -2.434 0.0149 factor(week)11 -1.6752 0.1873 0.6780 -2.471 0.0135 Likelihood ratio test=15.45 on 4 df, p=0.003854 n= 220, number of events= 177 > coxph(Surv(Time, unclass(y)) ~ I(week > 2) + strata(ID), + data = bacteria, method = "exact") Call: coxph(formula = Surv(Time, unclass(y)) ~ I(week > 2) + strata(ID), data = bacteria, method = "exact") coef exp(coef) se(coef) z p I(week > 2)TRUE -1.6701 0.1882 0.4817 -3.467 0.000527 Likelihood ratio test=15.15 on 1 df, p=9.927e-05 n= 220, number of events= 177 > > # PQL glmm analysis > library(nlme) > summary(glmmPQL(y ~ trt + I(week > 2), random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.410637 0.7800511 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + I(week > 2) Value Std.Error DF t-value p-value (Intercept) 2.7447864 0.3784193 169 7.253294 0.0000 trtdrug -1.2473553 0.6440635 47 -1.936696 0.0588 trtencourage 0.4930279 0.6699339 47 0.735935 0.4654 I(week > 2)TRUE -1.6072570 0.3583379 169 -4.485311 0.0000 Correlation: (Intr) trtdrg trtncr trtdrug 0.009 trtencourage 0.036 -0.518 I(week > 2)TRUE -0.710 0.047 -0.046 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -5.1985361 0.1572336 0.3513075 0.4949482 1.7448845 Number of Observations: 220 Number of Groups: 50 > > > > cleanEx() detaching ‘package:nlme’, ‘package:survival’ > nameEx("bandwidth.nrd") > ### * bandwidth.nrd > > flush(stderr()); flush(stdout()) > > ### Name: bandwidth.nrd > ### Title: Bandwidth for density() via Normal Reference Distribution > ### Aliases: bandwidth.nrd > ### Keywords: dplot > > ### ** Examples > > # The function is currently defined as > function(x) + { + r <- quantile(x, c(0.25, 0.75)) + h <- (r[2] - r[1])/1.34 + 4 * 1.06 * min(sqrt(var(x)), h) * length(x)^(-1/5) + } function (x) { r <- quantile(x, c(0.25, 0.75)) h <- (r[2] - r[1])/1.34 4 * 1.06 * min(sqrt(var(x)), h) * length(x)^(-1/5) } > > > > cleanEx() > nameEx("bcv") > ### * bcv > > flush(stderr()); flush(stdout()) > > ### Name: bcv > ### Title: Biased Cross-Validation for Bandwidth Selection > ### Aliases: bcv > ### Keywords: dplot > > ### ** Examples > > bcv(geyser$duration) [1] 0.8940809 > > > > cleanEx() > nameEx("beav1") > ### * beav1 > > flush(stderr()); flush(stdout()) > > ### Name: beav1 > ### Title: Body Temperature Series of Beaver 1 > ### Aliases: beav1 > ### Keywords: datasets > > ### ** Examples > > beav1 <- within(beav1, + hours <- 24*(day-346) + trunc(time/100) + (time%%100)/60) > plot(beav1$hours, beav1$temp, type="l", xlab="time", + ylab="temperature", main="Beaver 1") > usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr=usr) > lines(beav1$hours, beav1$activ, type="s", lty=2) > temp <- ts(c(beav1$temp[1:82], NA, beav1$temp[83:114]), + start = 9.5, frequency = 6) > activ <- ts(c(beav1$activ[1:82], NA, beav1$activ[83:114]), + start = 9.5, frequency = 6) > > acf(temp[1:53]) > acf(temp[1:53], type = "partial") > ar(temp[1:53]) Call: ar(x = temp[1:53]) Coefficients: 1 0.8222 Order selected 1 sigma^2 estimated as 0.01011 > act <- c(rep(0, 10), activ) > X <- cbind(1, act = act[11:125], act1 = act[10:124], + act2 = act[9:123], act3 = act[8:122]) > alpha <- 0.80 > stemp <- as.vector(temp - alpha*lag(temp, -1)) > sX <- X[-1, ] - alpha * X[-115,] > beav1.ls <- lm(stemp ~ -1 + sX, na.action = na.omit) > summary(beav1.ls, cor = FALSE) Call: lm(formula = stemp ~ -1 + sX, na.action = na.omit) Residuals: Min 1Q Median 3Q Max -0.21317 -0.04317 0.00683 0.05483 0.37683 Coefficients: Estimate Std. Error t value Pr(>|t|) sX 36.85587 0.03922 939.833 < 2e-16 *** sXact 0.25400 0.03930 6.464 3.37e-09 *** sXact1 0.17096 0.05100 3.352 0.00112 ** sXact2 0.16202 0.05147 3.148 0.00215 ** sXact3 0.10548 0.04310 2.448 0.01605 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.08096 on 104 degrees of freedom (5 observations deleted due to missingness) Multiple R-squared: 0.9999, Adjusted R-squared: 0.9999 F-statistic: 1.81e+05 on 5 and 104 DF, p-value: < 2.2e-16 > rm(temp, activ) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("beav2") > ### * beav2 > > flush(stderr()); flush(stdout()) > > ### Name: beav2 > ### Title: Body Temperature Series of Beaver 2 > ### Aliases: beav2 > ### Keywords: datasets > > ### ** Examples > > attach(beav2) > beav2$hours <- 24*(day-307) + trunc(time/100) + (time%%100)/60 > plot(beav2$hours, beav2$temp, type = "l", xlab = "time", + ylab = "temperature", main = "Beaver 2") > usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr = usr) > lines(beav2$hours, beav2$activ, type = "s", lty = 2) > > temp <- ts(temp, start = 8+2/3, frequency = 6) > activ <- ts(activ, start = 8+2/3, frequency = 6) > acf(temp[activ == 0]); acf(temp[activ == 1]) # also look at PACFs > ar(temp[activ == 0]); ar(temp[activ == 1]) Call: ar(x = temp[activ == 0]) Coefficients: 1 0.7392 Order selected 1 sigma^2 estimated as 0.02011 Call: ar(x = temp[activ == 1]) Coefficients: 1 0.7894 Order selected 1 sigma^2 estimated as 0.01792 > > arima(temp, order = c(1,0,0), xreg = activ) Call: arima(x = temp, order = c(1, 0, 0), xreg = activ) Coefficients: ar1 intercept activ 0.8733 37.1920 0.6139 s.e. 0.0684 0.1187 0.1381 sigma^2 estimated as 0.01518: log likelihood = 66.78, aic = -125.55 > dreg <- cbind(sin = sin(2*pi*beav2$hours/24), cos = cos(2*pi*beav2$hours/24)) > arima(temp, order = c(1,0,0), xreg = cbind(active=activ, dreg)) Call: arima(x = temp, order = c(1, 0, 0), xreg = cbind(active = activ, dreg)) Coefficients: ar1 intercept active dreg.sin dreg.cos 0.7905 37.1674 0.5322 -0.282 0.1201 s.e. 0.0681 0.0939 0.1282 0.105 0.0997 sigma^2 estimated as 0.01434: log likelihood = 69.83, aic = -127.67 > > ## IGNORE_RDIFF_BEGIN > library(nlme) # for gls and corAR1 > beav2.gls <- gls(temp ~ activ, data = beav2, corr = corAR1(0.8), + method = "ML") > summary(beav2.gls) Generalized least squares fit by maximum likelihood Model: temp ~ activ Data: beav2 AIC BIC logLik -125.5505 -115.1298 66.77523 Correlation Structure: AR(1) Formula: ~1 Parameter estimate(s): Phi 0.8731771 Coefficients: Value Std.Error t-value p-value (Intercept) 37.19195 0.1131328 328.7460 0 activ 0.61418 0.1087286 5.6487 0 Correlation: (Intr) activ -0.582 Standardized residuals: Min Q1 Med Q3 Max -2.42080780 -0.61510520 -0.03573836 0.81641138 2.15153499 Residual standard error: 0.2527856 Degrees of freedom: 100 total; 98 residual > summary(update(beav2.gls, subset = 6:100)) Generalized least squares fit by maximum likelihood Model: temp ~ activ Data: beav2 Subset: 6:100 AIC BIC logLik -124.981 -114.7654 66.49048 Correlation Structure: AR(1) Formula: ~1 Parameter estimate(s): Phi 0.8380448 Coefficients: Value Std.Error t-value p-value (Intercept) 37.25001 0.09634047 386.6496 0 activ 0.60277 0.09931904 6.0690 0 Correlation: (Intr) activ -0.657 Standardized residuals: Min Q1 Med Q3 Max -2.0231494 -0.8910348 -0.1497564 0.7640939 2.2719468 Residual standard error: 0.2188542 Degrees of freedom: 95 total; 93 residual > detach("beav2"); rm(temp, activ) > ## IGNORE_RDIFF_END > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() detaching ‘package:nlme’ > nameEx("birthwt") > ### * birthwt > > flush(stderr()); flush(stdout()) > > ### Name: birthwt > ### Title: Risk Factors Associated with Low Infant Birth Weight > ### Aliases: birthwt > ### Keywords: datasets > > ### ** Examples > > bwt <- with(birthwt, { + race <- factor(race, labels = c("white", "black", "other")) + ptd <- factor(ptl > 0) + ftv <- factor(ftv) + levels(ftv)[-(1:2)] <- "2+" + data.frame(low = factor(low), age, lwt, race, smoke = (smoke > 0), + ptd, ht = (ht > 0), ui = (ui > 0), ftv) + }) > options(contrasts = c("contr.treatment", "contr.poly")) > glm(low ~ ., binomial, bwt) Call: glm(formula = low ~ ., family = binomial, data = bwt) Coefficients: (Intercept) age lwt raceblack raceother smokeTRUE 0.82302 -0.03723 -0.01565 1.19241 0.74068 0.75553 ptdTRUE htTRUE uiTRUE ftv1 ftv2+ 1.34376 1.91317 0.68020 -0.43638 0.17901 Degrees of Freedom: 188 Total (i.e. Null); 178 Residual Null Deviance: 234.7 Residual Deviance: 195.5 AIC: 217.5 > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("boxcox") > ### * boxcox > > flush(stderr()); flush(stdout()) > > ### Name: boxcox > ### Title: Box-Cox Transformations for Linear Models > ### Aliases: boxcox boxcox.default boxcox.formula boxcox.lm > ### Keywords: regression models hplot > > ### ** Examples > > boxcox(Volume ~ log(Height) + log(Girth), data = trees, + lambda = seq(-0.25, 0.25, length = 10)) > > boxcox(Days+1 ~ Eth*Sex*Age*Lrn, data = quine, + lambda = seq(-0.05, 0.45, len = 20)) > > > > cleanEx() > nameEx("caith") > ### * caith > > flush(stderr()); flush(stdout()) > > ### Name: caith > ### Title: Colours of Eyes and Hair of People in Caithness > ### Aliases: caith > ### Keywords: datasets > > ### ** Examples > > corresp(caith) First canonical correlation(s): 0.4463684 Row scores: blue light medium dark -0.89679252 -0.98731818 0.07530627 1.57434710 Column scores: fair red medium dark black -1.21871379 -0.52257500 -0.09414671 1.31888486 2.45176017 > dimnames(caith)[[2]] <- c("F", "R", "M", "D", "B") > par(mfcol=c(1,3)) > plot(corresp(caith, nf=2)); title("symmetric") > plot(corresp(caith, nf=2), type="rows"); title("rows") > plot(corresp(caith, nf=2), type="col"); title("columns") > par(mfrow=c(1,1)) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("cement") > ### * cement > > flush(stderr()); flush(stdout()) > > ### Name: cement > ### Title: Heat Evolved by Setting Cements > ### Aliases: cement > ### Keywords: datasets > > ### ** Examples > > lm(y ~ x1 + x2 + x3 + x4, cement) Call: lm(formula = y ~ x1 + x2 + x3 + x4, data = cement) Coefficients: (Intercept) x1 x2 x3 x4 62.4054 1.5511 0.5102 0.1019 -0.1441 > > > > cleanEx() > nameEx("confint") > ### * confint > > flush(stderr()); flush(stdout()) > > ### Name: confint-MASS > ### Title: Confidence Intervals for Model Parameters > ### Aliases: confint.glm confint.nls confint.profile.glm > ### confint.profile.nls > ### Keywords: models > > ### ** Examples > > expn1 <- deriv(y ~ b0 + b1 * 2^(-x/th), c("b0", "b1", "th"), + function(b0, b1, th, x) {}) > > wtloss.gr <- nls(Weight ~ expn1(b0, b1, th, Days), + data = wtloss, start = c(b0=90, b1=95, th=120)) > > expn2 <- deriv(~b0 + b1*((w0 - b0)/b1)^(x/d0), + c("b0","b1","d0"), function(b0, b1, d0, x, w0) {}) > > wtloss.init <- function(obj, w0) { + p <- coef(obj) + d0 <- - log((w0 - p["b0"])/p["b1"])/log(2) * p["th"] + c(p[c("b0", "b1")], d0 = as.vector(d0)) + } > > out <- NULL > w0s <- c(110, 100, 90) > for(w0 in w0s) { + fm <- nls(Weight ~ expn2(b0, b1, d0, Days, w0), + wtloss, start = wtloss.init(wtloss.gr, w0)) + out <- rbind(out, c(coef(fm)["d0"], confint(fm, "d0"))) + } Waiting for profiling to be done... Waiting for profiling to be done... Waiting for profiling to be done... > dimnames(out) <- list(paste(w0s, "kg:"), c("d0", "low", "high")) > out d0 low high 110 kg: 261.5132 256.2303 267.5009 100 kg: 349.4979 334.7293 368.0151 90 kg: 507.0941 457.2637 594.8745 > > ldose <- rep(0:5, 2) > numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) > sex <- factor(rep(c("M", "F"), c(6, 6))) > SF <- cbind(numdead, numalive = 20 - numdead) > budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial) > confint(budworm.lg0) Waiting for profiling to be done... 2.5 % 97.5 % sexF -4.4581438 -2.613610 sexM -3.1728745 -1.655117 ldose 0.8228708 1.339058 > confint(budworm.lg0, "ldose") Waiting for profiling to be done... 2.5 % 97.5 % 0.8228708 1.3390581 > > > > cleanEx() > nameEx("contr.sdif") > ### * contr.sdif > > flush(stderr()); flush(stdout()) > > ### Name: contr.sdif > ### Title: Successive Differences Contrast Coding > ### Aliases: contr.sdif > ### Keywords: models > > ### ** Examples > > (A <- contr.sdif(6)) 2-1 3-2 4-3 5-4 6-5 1 -0.8333333 -0.6666667 -0.5 -0.3333333 -0.1666667 2 0.1666667 -0.6666667 -0.5 -0.3333333 -0.1666667 3 0.1666667 0.3333333 -0.5 -0.3333333 -0.1666667 4 0.1666667 0.3333333 0.5 -0.3333333 -0.1666667 5 0.1666667 0.3333333 0.5 0.6666667 -0.1666667 6 0.1666667 0.3333333 0.5 0.6666667 0.8333333 > zapsmall(ginv(A)) [,1] [,2] [,3] [,4] [,5] [,6] [1,] -1 1 0 0 0 0 [2,] 0 -1 1 0 0 0 [3,] 0 0 -1 1 0 0 [4,] 0 0 0 -1 1 0 [5,] 0 0 0 0 -1 1 > > > > cleanEx() > nameEx("corresp") > ### * corresp > > flush(stderr()); flush(stdout()) > > ### Name: corresp > ### Title: Simple Correspondence Analysis > ### Aliases: corresp corresp.xtabs corresp.data.frame corresp.default > ### corresp.factor corresp.formula corresp.matrix > ### Keywords: category multivariate > > ### ** Examples > > (ct <- corresp(~ Age + Eth, data = quine)) First canonical correlation(s): 0.05317534 Age scores: F0 F1 F2 F3 -0.3344445 1.4246090 -1.0320002 -0.4612728 Eth scores: A N -1.0563816 0.9466276 > plot(ct) > > corresp(caith) First canonical correlation(s): 0.4463684 Row scores: blue light medium dark -0.89679252 -0.98731818 0.07530627 1.57434710 Column scores: fair red medium dark black -1.21871379 -0.52257500 -0.09414671 1.31888486 2.45176017 > biplot(corresp(caith, nf = 2)) > > > > cleanEx() > nameEx("cov.rob") > ### * cov.rob > > flush(stderr()); flush(stdout()) > > ### Name: cov.rob > ### Title: Resistant Estimation of Multivariate Location and Scatter > ### Aliases: cov.rob cov.mve cov.mcd > ### Keywords: robust multivariate > > ### ** Examples > > set.seed(123) > cov.rob(stackloss) $center Air.Flow Water.Temp Acid.Conc. stack.loss 56.3750 20.0000 85.4375 13.0625 $cov Air.Flow Water.Temp Acid.Conc. stack.loss Air.Flow 23.050000 6.666667 16.625000 19.308333 Water.Temp 6.666667 5.733333 5.333333 7.733333 Acid.Conc. 16.625000 5.333333 34.395833 13.837500 stack.loss 19.308333 7.733333 13.837500 18.462500 $msg [1] "20 singular samples of size 5 out of 2500" $crit [1] 19.89056 $best [1] 5 6 7 8 9 10 11 12 15 16 18 19 20 $n.obs [1] 21 > cov.rob(stack.x, method = "mcd", nsamp = "exact") $center Air.Flow Water.Temp Acid.Conc. 56.70588 20.23529 85.52941 $cov Air.Flow Water.Temp Acid.Conc. Air.Flow 23.470588 7.573529 16.102941 Water.Temp 7.573529 6.316176 5.367647 Acid.Conc. 16.102941 5.367647 32.389706 $msg [1] "266 singular samples of size 4 out of 5985" $crit [1] 5.472581 $best [1] 4 5 6 7 8 9 10 11 12 13 14 20 $n.obs [1] 21 > > > > cleanEx() > nameEx("cov.trob") > ### * cov.trob > > flush(stderr()); flush(stdout()) > > ### Name: cov.trob > ### Title: Covariance Estimation for Multivariate t Distribution > ### Aliases: cov.trob > ### Keywords: multivariate > > ### ** Examples > > cov.trob(stackloss) $cov Air.Flow Water.Temp Acid.Conc. stack.loss Air.Flow 60.47035 17.027203 18.554452 62.28032 Water.Temp 17.02720 8.085857 5.604132 20.50469 Acid.Conc. 18.55445 5.604132 24.404633 16.91085 stack.loss 62.28032 20.504687 16.910855 72.80743 $center Air.Flow Water.Temp Acid.Conc. stack.loss 58.96905 20.79263 86.05588 16.09028 $n.obs [1] 21 $call cov.trob(x = stackloss) $iter [1] 5 > > > > cleanEx() > nameEx("denumerate") > ### * denumerate > > flush(stderr()); flush(stdout()) > > ### Name: denumerate > ### Title: Transform an Allowable Formula for 'loglm' into one for 'terms' > ### Aliases: denumerate denumerate.formula > ### Keywords: models > > ### ** Examples > > denumerate(~(1+2+3)^3 + a/b) ~(.v1 + .v2 + .v3)^3 + a/b > ## which gives ~ (.v1 + .v2 + .v3)^3 + a/b > > > > cleanEx() > nameEx("dose.p") > ### * dose.p > > flush(stderr()); flush(stdout()) > > ### Name: dose.p > ### Title: Predict Doses for Binomial Assay model > ### Aliases: dose.p print.glm.dose > ### Keywords: regression models > > ### ** Examples > > ldose <- rep(0:5, 2) > numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) > sex <- factor(rep(c("M", "F"), c(6, 6))) > SF <- cbind(numdead, numalive = 20 - numdead) > budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial) > > dose.p(budworm.lg0, cf = c(1,3), p = 1:3/4) Dose SE p = 0.25: 2.231265 0.2499089 p = 0.50: 3.263587 0.2297539 p = 0.75: 4.295910 0.2746874 > dose.p(update(budworm.lg0, family = binomial(link=probit)), + cf = c(1,3), p = 1:3/4) Dose SE p = 0.25: 2.191229 0.2384478 p = 0.50: 3.257703 0.2240685 p = 0.75: 4.324177 0.2668745 > > > > cleanEx() > nameEx("dropterm") > ### * dropterm > > flush(stderr()); flush(stdout()) > > ### Name: dropterm > ### Title: Try All One-Term Deletions from a Model > ### Aliases: dropterm dropterm.default dropterm.glm dropterm.lm > ### Keywords: models > > ### ** Examples > > quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) > quine.nxt <- update(quine.hi, . ~ . - Eth:Sex:Age:Lrn) > dropterm(quine.nxt, test= "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Age + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 64.099 -68.184 Eth:Sex:Age 3 0.97387 65.073 -71.982 0.60773 0.61125 Eth:Sex:Lrn 1 1.57879 65.678 -66.631 2.95567 0.08816 . Eth:Age:Lrn 2 2.12841 66.227 -67.415 1.99230 0.14087 Sex:Age:Lrn 2 1.46623 65.565 -68.882 1.37247 0.25743 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.stp <- stepAIC(quine.nxt, + scope = list(upper = ~Eth*Sex*Age*Lrn, lower = ~1), + trace = FALSE) > dropterm(quine.stp, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 66.600 -72.597 Sex:Age 3 10.7959 77.396 -56.663 6.7542 0.0002933 *** Eth:Sex:Lrn 1 3.0325 69.632 -68.096 5.6916 0.0185476 * Eth:Age:Lrn 2 2.0960 68.696 -72.072 1.9670 0.1441822 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.3 <- update(quine.stp, . ~ . - Eth:Age:Lrn) > dropterm(quine.3, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 68.696 -72.072 Eth:Age 3 3.0312 71.727 -71.768 1.8679 0.1383323 Sex:Age 3 11.4272 80.123 -55.607 7.0419 0.0002037 *** Age:Lrn 2 2.8149 71.511 -70.209 2.6020 0.0780701 . Eth:Sex:Lrn 1 4.6956 73.391 -64.419 8.6809 0.0038268 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.4 <- update(quine.3, . ~ . - Eth:Age) > dropterm(quine.4, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 71.727 -71.768 Sex:Age 3 11.5656 83.292 -55.942 6.9873 0.0002147 *** Age:Lrn 2 2.9118 74.639 -69.959 2.6387 0.0752793 . Eth:Sex:Lrn 1 6.8181 78.545 -60.511 12.3574 0.0006052 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.5 <- update(quine.4, . ~ . - Age:Lrn) > dropterm(quine.5, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Lrn + Sex:Age + Sex:Lrn + Eth:Sex:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 74.639 -69.959 Sex:Age 3 9.9002 84.539 -57.774 5.8362 0.0008944 *** Eth:Sex:Lrn 1 6.2988 80.937 -60.130 11.1396 0.0010982 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family=poisson, + data = housing) > house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) > dropterm(house.glm1, test = "Chisq") Single term deletions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Sat 4 147.780 556.75 109.117 < 2.2e-16 *** Type:Sat 6 100.889 505.86 62.227 1.586e-11 *** Cont:Sat 2 54.722 467.69 16.060 0.0003256 *** Infl:Type:Cont 6 43.952 448.92 5.290 0.5072454 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("eagles") > ### * eagles > > flush(stderr()); flush(stdout()) > > ### Name: eagles > ### Title: Foraging Ecology of Bald Eagles > ### Aliases: eagles > ### Keywords: datasets > > ### ** Examples > > eagles.glm <- glm(cbind(y, n - y) ~ P*A + V, data = eagles, + family = binomial) > dropterm(eagles.glm) Single term deletions Model: cbind(y, n - y) ~ P * A + V Df Deviance AIC 0.333 23.073 V 1 53.737 74.478 P:A 1 6.956 27.696 > prof <- profile(eagles.glm) > plot(prof) > pairs(prof) > > > > cleanEx() > nameEx("epil") > ### * epil > > flush(stderr()); flush(stdout()) > > ### Name: epil > ### Title: Seizure Counts for Epileptics > ### Aliases: epil > ### Keywords: datasets > > ### ** Examples > > summary(glm(y ~ lbase*trt + lage + V4, family = poisson, + data = epil), cor = FALSE) Call: glm(formula = y ~ lbase * trt + lage + V4, family = poisson, data = epil) Deviance Residuals: Min 1Q Median 3Q Max -5.0915 -1.4126 -0.2739 0.7580 10.7711 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.89791 0.04260 44.552 < 2e-16 *** lbase 0.94862 0.04360 21.759 < 2e-16 *** trtprogabide -0.34588 0.06100 -5.670 1.42e-08 *** lage 0.88760 0.11650 7.619 2.56e-14 *** V4 -0.15977 0.05458 -2.927 0.00342 ** lbase:trtprogabide 0.56154 0.06352 8.841 < 2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 2517.83 on 235 degrees of freedom Residual deviance: 869.07 on 230 degrees of freedom AIC: 1647 Number of Fisher Scoring iterations: 5 > epil2 <- epil[epil$period == 1, ] > epil2["period"] <- rep(0, 59); epil2["y"] <- epil2["base"] > epil["time"] <- 1; epil2["time"] <- 4 > epil2 <- rbind(epil, epil2) > epil2$pred <- unclass(epil2$trt) * (epil2$period > 0) > epil2$subject <- factor(epil2$subject) > epil3 <- aggregate(epil2, list(epil2$subject, epil2$period > 0), + function(x) if(is.numeric(x)) sum(x) else x[1]) > epil3$pred <- factor(epil3$pred, + labels = c("base", "placebo", "drug")) > > contrasts(epil3$pred) <- structure(contr.sdif(3), + dimnames = list(NULL, c("placebo-base", "drug-placebo"))) > ## IGNORE_RDIFF_BEGIN > summary(glm(y ~ pred + factor(subject) + offset(log(time)), + family = poisson, data = epil3), cor = FALSE) Call: glm(formula = y ~ pred + factor(subject) + offset(log(time)), family = poisson, data = epil3) Deviance Residuals: Min 1Q Median 3Q Max -5.2928 -0.7350 0.0000 0.6997 4.7145 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.122e+00 2.008e-01 5.590 2.28e-08 *** predplacebo-base 1.087e-01 4.691e-02 2.318 0.020474 * preddrug-placebo -1.016e-01 6.507e-02 -1.561 0.118431 factor(subject)2 -2.300e-15 2.828e-01 0.000 1.000000 factor(subject)3 -3.857e-01 3.144e-01 -1.227 0.219894 factor(subject)4 -1.744e-01 2.960e-01 -0.589 0.555847 factor(subject)5 1.577e+00 2.197e-01 7.178 7.08e-13 *** factor(subject)6 6.729e-01 2.458e-01 2.738 0.006182 ** factor(subject)7 -4.082e-02 2.858e-01 -0.143 0.886411 factor(subject)8 1.758e+00 2.166e-01 8.117 4.77e-16 *** factor(subject)9 5.878e-01 2.494e-01 2.356 0.018454 * factor(subject)10 5.423e-01 2.515e-01 2.156 0.031060 * factor(subject)11 1.552e+00 2.202e-01 7.048 1.81e-12 *** factor(subject)12 9.243e-01 2.364e-01 3.910 9.22e-05 *** factor(subject)13 3.075e-01 2.635e-01 1.167 0.243171 factor(subject)14 1.212e+00 2.278e-01 5.320 1.04e-07 *** factor(subject)15 1.765e+00 2.164e-01 8.153 3.54e-16 *** factor(subject)16 9.708e-01 2.348e-01 4.134 3.57e-05 *** factor(subject)17 -4.082e-02 2.858e-01 -0.143 0.886411 factor(subject)18 2.236e+00 2.104e-01 10.629 < 2e-16 *** factor(subject)19 2.776e-01 2.651e-01 1.047 0.295060 factor(subject)20 3.646e-01 2.603e-01 1.401 0.161324 factor(subject)21 3.922e-02 2.801e-01 0.140 0.888645 factor(subject)22 -8.338e-02 2.889e-01 -0.289 0.772894 factor(subject)23 1.823e-01 2.708e-01 0.673 0.500777 factor(subject)24 8.416e-01 2.393e-01 3.517 0.000436 *** factor(subject)25 2.069e+00 2.123e-01 9.750 < 2e-16 *** factor(subject)26 -5.108e-01 3.266e-01 -1.564 0.117799 factor(subject)27 -2.231e-01 3.000e-01 -0.744 0.456990 factor(subject)28 1.386e+00 2.236e-01 6.200 5.66e-10 *** factor(subject)29 1.604e+00 2.227e-01 7.203 5.90e-13 *** factor(subject)30 1.023e+00 2.372e-01 4.313 1.61e-05 *** factor(subject)31 9.149e-02 2.821e-01 0.324 0.745700 factor(subject)32 -3.111e-02 2.909e-01 -0.107 0.914822 factor(subject)33 4.710e-01 2.597e-01 1.814 0.069736 . factor(subject)34 3.887e-01 2.640e-01 1.473 0.140879 factor(subject)35 1.487e+00 2.250e-01 6.609 3.87e-11 *** factor(subject)36 3.598e-01 2.656e-01 1.355 0.175551 factor(subject)37 -1.221e-01 2.979e-01 -0.410 0.681943 factor(subject)38 1.344e+00 2.283e-01 5.889 3.90e-09 *** factor(subject)39 1.082e+00 2.354e-01 4.596 4.30e-06 *** factor(subject)40 -7.687e-01 3.634e-01 -2.116 0.034384 * factor(subject)41 1.656e-01 2.772e-01 0.597 0.550234 factor(subject)42 5.227e-02 2.848e-01 0.184 0.854388 factor(subject)43 1.543e+00 2.239e-01 6.891 5.54e-12 *** factor(subject)44 9.605e-01 2.393e-01 4.014 5.96e-05 *** factor(subject)45 1.177e+00 2.326e-01 5.061 4.18e-07 *** factor(subject)46 -5.275e-01 3.355e-01 -1.572 0.115840 factor(subject)47 1.053e+00 2.363e-01 4.456 8.35e-06 *** factor(subject)48 -5.275e-01 3.355e-01 -1.572 0.115840 factor(subject)49 2.949e+00 2.082e-01 14.168 < 2e-16 *** factor(subject)50 3.887e-01 2.640e-01 1.473 0.140879 factor(subject)51 1.038e+00 2.367e-01 4.385 1.16e-05 *** factor(subject)52 5.711e-01 2.548e-01 2.241 0.025023 * factor(subject)53 1.670e+00 2.215e-01 7.538 4.76e-14 *** factor(subject)54 4.443e-01 2.611e-01 1.702 0.088759 . factor(subject)55 2.674e-01 2.709e-01 0.987 0.323618 factor(subject)56 1.124e+00 2.341e-01 4.800 1.59e-06 *** factor(subject)57 2.674e-01 2.709e-01 0.987 0.323618 factor(subject)58 -6.017e-01 3.436e-01 -1.751 0.079911 . factor(subject)59 -7.556e-02 2.942e-01 -0.257 0.797331 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 3180.82 on 117 degrees of freedom Residual deviance: 303.16 on 57 degrees of freedom AIC: 1003.5 Number of Fisher Scoring iterations: 5 > ## IGNORE_RDIFF_END > > summary(glmmPQL(y ~ lbase*trt + lage + V4, + random = ~ 1 | subject, + family = poisson, data = epil)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 Linear mixed-effects model fit by maximum likelihood Data: epil AIC BIC logLik NA NA NA Random effects: Formula: ~1 | subject (Intercept) Residual StdDev: 0.4442704 1.400807 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ lbase * trt + lage + V4 Value Std.Error DF t-value p-value (Intercept) 1.8696677 0.1055620 176 17.711554 0.0000 lbase 0.8818228 0.1292834 54 6.820849 0.0000 trtprogabide -0.3095253 0.1490438 54 -2.076740 0.0426 lage 0.5335460 0.3463119 54 1.540652 0.1292 V4 -0.1597696 0.0774521 176 -2.062819 0.0406 lbase:trtprogabide 0.3415425 0.2033325 54 1.679725 0.0988 Correlation: (Intr) lbase trtprg lage V4 lbase -0.126 trtprogabide -0.691 0.089 lage -0.103 -0.038 0.088 V4 -0.162 0.000 0.000 0.000 lbase:trtprogabide 0.055 -0.645 -0.184 0.267 0.000 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -2.13240534 -0.63871136 -0.08486339 0.41960195 4.97872138 Number of Observations: 236 Number of Groups: 59 > summary(glmmPQL(y ~ pred, random = ~1 | subject, + family = poisson, data = epil3)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 iteration 7 iteration 8 Linear mixed-effects model fit by maximum likelihood Data: epil3 AIC BIC logLik NA NA NA Random effects: Formula: ~1 | subject (Intercept) Residual StdDev: 0.7257895 2.16629 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ pred Value Std.Error DF t-value p-value (Intercept) 3.213631 0.10569117 58 30.405865 0.0000 predplacebo-base 0.110855 0.09989089 57 1.109763 0.2718 preddrug-placebo -0.105613 0.13480483 57 -0.783450 0.4366 Correlation: (Intr) prdpl- predplacebo-base 0.081 preddrug-placebo -0.010 -0.700 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -2.0446864 -0.4765135 -0.1975651 0.3145761 2.6532834 Number of Observations: 118 Number of Groups: 59 > > > > cleanEx() > nameEx("farms") > ### * farms > > flush(stderr()); flush(stdout()) > > ### Name: farms > ### Title: Ecological Factors in Farm Management > ### Aliases: farms > ### Keywords: datasets > > ### ** Examples > > farms.mca <- mca(farms, abbrev = TRUE) # Use levels as names > eqscplot(farms.mca$cs, type = "n") > text(farms.mca$rs, cex = 0.7) > text(farms.mca$cs, labels = dimnames(farms.mca$cs)[[1]], cex = 0.7) > > > > cleanEx() > nameEx("fitdistr") > ### * fitdistr > > flush(stderr()); flush(stdout()) > > ### Name: fitdistr > ### Title: Maximum-likelihood Fitting of Univariate Distributions > ### Aliases: fitdistr > ### Keywords: distribution htest > > ### ** Examples > > ## avoid spurious accuracy > op <- options(digits = 3) > set.seed(123) > x <- rgamma(100, shape = 5, rate = 0.1) > fitdistr(x, "gamma") shape rate 6.4870 0.1365 (0.8946) (0.0196) > ## now do this directly with more control. > fitdistr(x, dgamma, list(shape = 1, rate = 0.1), lower = 0.001) shape rate 6.4869 0.1365 (0.8944) (0.0196) > > set.seed(123) > x2 <- rt(250, df = 9) > fitdistr(x2, "t", df = 9) m s -0.0107 1.0441 ( 0.0722) ( 0.0543) > ## allow df to vary: not a very good idea! > fitdistr(x2, "t") Warning in dt((x - m)/s, df, log = TRUE) : NaNs produced m s df -0.00965 1.00617 6.62729 ( 0.07147) ( 0.07707) ( 2.71033) > ## now do fixed-df fit directly with more control. > mydt <- function(x, m, s, df) dt((x-m)/s, df)/s > fitdistr(x2, mydt, list(m = 0, s = 1), df = 9, lower = c(-Inf, 0)) m s -0.0107 1.0441 ( 0.0722) ( 0.0543) > > set.seed(123) > x3 <- rweibull(100, shape = 4, scale = 100) > fitdistr(x3, "weibull") shape scale 4.080 99.984 ( 0.313) ( 2.582) > > set.seed(123) > x4 <- rnegbin(500, mu = 5, theta = 4) > fitdistr(x4, "Negative Binomial") size mu 4.216 4.945 (0.504) (0.147) > options(op) > > > > cleanEx() > nameEx("fractions") > ### * fractions > > flush(stderr()); flush(stdout()) > > ### Name: fractions > ### Title: Rational Approximation > ### Aliases: fractions Math.fractions Ops.fractions Summary.fractions > ### [.fractions [<-.fractions as.character.fractions as.fractions > ### is.fractions print.fractions t.fractions > ### Keywords: math > > ### ** Examples > > X <- matrix(runif(25), 5, 5) > zapsmall(solve(X, X/5)) # print near-zeroes as zero [,1] [,2] [,3] [,4] [,5] [1,] 0.2 0.0 0.0 0.0 0.0 [2,] 0.0 0.2 0.0 0.0 0.0 [3,] 0.0 0.0 0.2 0.0 0.0 [4,] 0.0 0.0 0.0 0.2 0.0 [5,] 0.0 0.0 0.0 0.0 0.2 > fractions(solve(X, X/5)) [,1] [,2] [,3] [,4] [,5] [1,] 1/5 0 0 0 0 [2,] 0 1/5 0 0 0 [3,] 0 0 1/5 0 0 [4,] 0 0 0 1/5 0 [5,] 0 0 0 0 1/5 > fractions(solve(X, X/5)) + 1 [,1] [,2] [,3] [,4] [,5] [1,] 6/5 1 1 1 1 [2,] 1 6/5 1 1 1 [3,] 1 1 6/5 1 1 [4,] 1 1 1 6/5 1 [5,] 1 1 1 1 6/5 > > > > cleanEx() > nameEx("galaxies") > ### * galaxies > > flush(stderr()); flush(stdout()) > > ### Name: galaxies > ### Title: Velocities for 82 Galaxies > ### Aliases: galaxies > ### Keywords: datasets > > ### ** Examples > > gal <- galaxies/1000 > c(width.SJ(gal, method = "dpi"), width.SJ(gal)) [1] 3.256151 2.566423 > plot(x = c(0, 40), y = c(0, 0.3), type = "n", bty = "l", + xlab = "velocity of galaxy (1000km/s)", ylab = "density") > rug(gal) > lines(density(gal, width = 3.25, n = 200), lty = 1) > lines(density(gal, width = 2.56, n = 200), lty = 3) > > > > cleanEx() > nameEx("gamma.shape.glm") > ### * gamma.shape.glm > > flush(stderr()); flush(stdout()) > > ### Name: gamma.shape > ### Title: Estimate the Shape Parameter of the Gamma Distribution in a GLM > ### Fit > ### Aliases: gamma.shape gamma.shape.glm print.gamma.shape > ### Keywords: models > > ### ** Examples > > clotting <- data.frame( + u = c(5,10,15,20,30,40,60,80,100), + lot1 = c(118,58,42,35,27,25,21,19,18), + lot2 = c(69,35,26,21,18,16,13,12,12)) > clot1 <- glm(lot1 ~ log(u), data = clotting, family = Gamma) > gamma.shape(clot1) Alpha: 538.1315 SE: 253.5991 > > gm <- glm(Days + 0.1 ~ Age*Eth*Sex*Lrn, + quasi(link=log, variance="mu^2"), quine, + start = c(3, rep(0,31))) > gamma.shape(gm, verbose = TRUE) Initial estimate: 1.060344 Iter. 1 Alpha: 1.238408 Iter. 2 Alpha: 1.276997 Iter. 3 Alpha: 1.278343 Iter. 4 Alpha: 1.278345 Alpha: 1.2783449 SE: 0.1345175 > summary(gm, dispersion = gamma.dispersion(gm)) # better summary Call: glm(formula = Days + 0.1 ~ Age * Eth * Sex * Lrn, family = quasi(link = log, variance = "mu^2"), data = quine, start = c(3, rep(0, 31))) Deviance Residuals: Min 1Q Median 3Q Max -3.0385 -0.7164 -0.1532 0.3863 1.3087 Coefficients: (4 not defined because of singularities) Estimate Std. Error z value Pr(>|z|) (Intercept) 3.06105 0.44223 6.922 4.46e-12 *** AgeF1 -0.61870 0.59331 -1.043 0.297041 AgeF2 -2.31911 0.98885 -2.345 0.019014 * AgeF3 -0.37623 0.53149 -0.708 0.479020 EthN -0.13789 0.62540 -0.220 0.825496 SexM -0.48844 0.59331 -0.823 0.410369 LrnSL -1.92965 0.98885 -1.951 0.051009 . AgeF1:EthN 0.10249 0.82338 0.124 0.900942 AgeF2:EthN -0.50874 1.39845 -0.364 0.716017 AgeF3:EthN 0.06314 0.74584 0.085 0.932534 AgeF1:SexM 0.40695 0.94847 0.429 0.667884 AgeF2:SexM 3.06173 1.11626 2.743 0.006091 ** AgeF3:SexM 1.10841 0.74208 1.494 0.135267 EthN:SexM -0.74217 0.82338 -0.901 0.367394 AgeF1:LrnSL 2.60967 1.10114 2.370 0.017789 * AgeF2:LrnSL 4.78434 1.36304 3.510 0.000448 *** AgeF3:LrnSL NA NA NA NA EthN:LrnSL 2.22936 1.39845 1.594 0.110899 SexM:LrnSL 1.56531 1.18112 1.325 0.185077 AgeF1:EthN:SexM -0.30235 1.32176 -0.229 0.819065 AgeF2:EthN:SexM 0.29742 1.57035 0.189 0.849780 AgeF3:EthN:SexM 0.82215 1.03277 0.796 0.425995 AgeF1:EthN:LrnSL -3.50803 1.54655 -2.268 0.023311 * AgeF2:EthN:LrnSL -3.33529 1.92481 -1.733 0.083133 . AgeF3:EthN:LrnSL NA NA NA NA AgeF1:SexM:LrnSL -2.39791 1.51050 -1.587 0.112400 AgeF2:SexM:LrnSL -4.12161 1.60698 -2.565 0.010323 * AgeF3:SexM:LrnSL NA NA NA NA EthN:SexM:LrnSL -0.15305 1.66253 -0.092 0.926653 AgeF1:EthN:SexM:LrnSL 2.13480 2.08685 1.023 0.306317 AgeF2:EthN:SexM:LrnSL 2.11886 2.27882 0.930 0.352473 AgeF3:EthN:SexM:LrnSL NA NA NA NA --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for quasi family taken to be 0.7822615) Null deviance: 190.40 on 145 degrees of freedom Residual deviance: 128.36 on 118 degrees of freedom AIC: NA Number of Fisher Scoring iterations: 7 > > > > cleanEx() > nameEx("gehan") > ### * gehan > > flush(stderr()); flush(stdout()) > > ### Name: gehan > ### Title: Remission Times of Leukaemia Patients > ### Aliases: gehan > ### Keywords: datasets > > ### ** Examples > > library(survival) > gehan.surv <- survfit(Surv(time, cens) ~ treat, data = gehan, + conf.type = "log-log") > summary(gehan.surv) Call: survfit(formula = Surv(time, cens) ~ treat, data = gehan, conf.type = "log-log") treat=6-MP time n.risk n.event survival std.err lower 95% CI upper 95% CI 6 21 3 0.857 0.0764 0.620 0.952 7 17 1 0.807 0.0869 0.563 0.923 10 15 1 0.753 0.0963 0.503 0.889 13 12 1 0.690 0.1068 0.432 0.849 16 11 1 0.627 0.1141 0.368 0.805 22 7 1 0.538 0.1282 0.268 0.747 23 6 1 0.448 0.1346 0.188 0.680 treat=control time n.risk n.event survival std.err lower 95% CI upper 95% CI 1 21 2 0.9048 0.0641 0.67005 0.975 2 19 2 0.8095 0.0857 0.56891 0.924 3 17 1 0.7619 0.0929 0.51939 0.893 4 16 2 0.6667 0.1029 0.42535 0.825 5 14 2 0.5714 0.1080 0.33798 0.749 8 12 4 0.3810 0.1060 0.18307 0.578 11 8 2 0.2857 0.0986 0.11656 0.482 12 6 2 0.1905 0.0857 0.05948 0.377 15 4 1 0.1429 0.0764 0.03566 0.321 17 3 1 0.0952 0.0641 0.01626 0.261 22 2 1 0.0476 0.0465 0.00332 0.197 23 1 1 0.0000 NaN NA NA > survreg(Surv(time, cens) ~ factor(pair) + treat, gehan, dist = "exponential") Call: survreg(formula = Surv(time, cens) ~ factor(pair) + treat, data = gehan, dist = "exponential") Coefficients: (Intercept) factor(pair)2 factor(pair)3 factor(pair)4 factor(pair)5 2.0702861 2.1476909 1.8329493 1.7718527 1.4682566 factor(pair)6 factor(pair)7 factor(pair)8 factor(pair)9 factor(pair)10 1.8954775 0.5583010 2.5187140 2.2970513 2.4862208 factor(pair)11 factor(pair)12 factor(pair)13 factor(pair)14 factor(pair)15 1.0524472 1.8270477 1.6772567 1.7778672 2.0859913 factor(pair)16 factor(pair)17 factor(pair)18 factor(pair)19 factor(pair)20 3.0634288 0.7996252 1.5855018 1.4083884 0.4023946 factor(pair)21 treatcontrol 1.9698390 -1.7671562 Scale fixed at 1 Loglik(model)= -101.6 Loglik(intercept only)= -116.8 Chisq= 30.27 on 21 degrees of freedom, p= 0.0866 n= 42 > summary(survreg(Surv(time, cens) ~ treat, gehan, dist = "exponential")) Call: survreg(formula = Surv(time, cens) ~ treat, data = gehan, dist = "exponential") Value Std. Error z p (Intercept) 3.686 0.333 11.06 < 2e-16 treatcontrol -1.527 0.398 -3.83 0.00013 Scale fixed at 1 Exponential distribution Loglik(model)= -108.5 Loglik(intercept only)= -116.8 Chisq= 16.49 on 1 degrees of freedom, p= 4.9e-05 Number of Newton-Raphson Iterations: 4 n= 42 > summary(survreg(Surv(time, cens) ~ treat, gehan)) Call: survreg(formula = Surv(time, cens) ~ treat, data = gehan) Value Std. Error z p (Intercept) 3.516 0.252 13.96 < 2e-16 treatcontrol -1.267 0.311 -4.08 4.5e-05 Log(scale) -0.312 0.147 -2.12 0.034 Scale= 0.732 Weibull distribution Loglik(model)= -106.6 Loglik(intercept only)= -116.4 Chisq= 19.65 on 1 degrees of freedom, p= 9.3e-06 Number of Newton-Raphson Iterations: 5 n= 42 > gehan.cox <- coxph(Surv(time, cens) ~ treat, gehan) > summary(gehan.cox) Call: coxph(formula = Surv(time, cens) ~ treat, data = gehan) n= 42, number of events= 30 coef exp(coef) se(coef) z Pr(>|z|) treatcontrol 1.5721 4.8169 0.4124 3.812 0.000138 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 exp(coef) exp(-coef) lower .95 upper .95 treatcontrol 4.817 0.2076 2.147 10.81 Concordance= 0.69 (se = 0.041 ) Likelihood ratio test= 16.35 on 1 df, p=5e-05 Wald test = 14.53 on 1 df, p=1e-04 Score (logrank) test = 17.25 on 1 df, p=3e-05 > > > > cleanEx() detaching ‘package:survival’ > nameEx("glm.convert") > ### * glm.convert > > flush(stderr()); flush(stdout()) > > ### Name: glm.convert > ### Title: Change a Negative Binomial fit to a GLM fit > ### Aliases: glm.convert > ### Keywords: regression models > > ### ** Examples > > quine.nb1 <- glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = quine) > quine.nbA <- glm.convert(quine.nb1) > quine.nbB <- update(quine.nb1, . ~ . + Sex:Age:Lrn) > anova(quine.nbA, quine.nbB) Analysis of Deviance Table Model 1: Days ~ Sex/(Age + Eth * Lrn) Model 2: Days ~ Sex + Sex:Age + Sex:Eth + Sex:Lrn + Sex:Eth:Lrn + Sex:Age:Lrn Resid. Df Resid. Dev Df Deviance 1 132 167.56 2 128 166.83 4 0.723 > > > > cleanEx() > nameEx("glm.nb") > ### * glm.nb > > flush(stderr()); flush(stdout()) > > ### Name: glm.nb > ### Title: Fit a Negative Binomial Generalized Linear Model > ### Aliases: glm.nb family.negbin logLik.negbin > ### Keywords: regression models > > ### ** Examples > > quine.nb1 <- glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = quine) > quine.nb2 <- update(quine.nb1, . ~ . + Sex:Age:Lrn) > quine.nb3 <- update(quine.nb2, Days ~ .^4) > anova(quine.nb1, quine.nb2, quine.nb3) Likelihood ratio tests of Negative Binomial Models Response: Days Model 1 Sex/(Age + Eth * Lrn) 2 Sex + Sex:Age + Sex:Eth + Sex:Lrn + Sex:Eth:Lrn + Sex:Age:Lrn 3 Sex + Sex:Age + Sex:Eth + Sex:Lrn + Sex:Eth:Lrn + Sex:Age:Lrn + Sex:Age:Eth + Sex:Age:Eth:Lrn theta Resid. df 2 x log-lik. Test df LR stat. Pr(Chi) 1 1.597991 132 -1063.025 2 1.686899 128 -1055.398 1 vs 2 4 7.627279 0.10622602 3 1.928360 118 -1039.324 2 vs 3 10 16.073723 0.09754136 > ## Don't show: > ## PR#1695 > y <- c(7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, 6, 12, + 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3, 3, 4) > > lag1 <- c(0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, + 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3, 3) > > lag2 <- c(0, 0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, + 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3) > > lag3 <- c(0, 0, 0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, + 7, 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5) > > (fit <- glm(y ~ lag1+lag2+lag3, family=poisson(link=identity), + start=c(2, 0.1, 0.1, 0.1))) Call: glm(formula = y ~ lag1 + lag2 + lag3, family = poisson(link = identity), start = c(2, 0.1, 0.1, 0.1)) Coefficients: (Intercept) lag1 lag2 lag3 2.6609 0.1573 0.1424 0.1458 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 100.2 Residual Deviance: 90.34 AIC: 225.6 > try(glm.nb(y ~ lag1+lag2+lag3, link=identity)) Warning in log(y/mu) : NaNs produced Error : no valid set of coefficients has been found: please supply starting values > glm.nb(y ~ lag1+lag2+lag3, link=identity, start=c(2, 0.1, 0.1, 0.1)) Call: glm.nb(formula = y ~ lag1 + lag2 + lag3, start = c(2, 0.1, 0.1, 0.1), link = identity, init.theta = 4.406504429) Coefficients: (Intercept) lag1 lag2 lag3 2.6298 0.1774 0.1407 0.1346 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 55.07 Residual Deviance: 50.09 AIC: 215.9 > glm.nb(y ~ lag1+lag2+lag3, link=identity, start=coef(fit)) Call: glm.nb(formula = y ~ lag1 + lag2 + lag3, start = coef(fit), link = identity, init.theta = 4.406504429) Coefficients: (Intercept) lag1 lag2 lag3 2.6298 0.1774 0.1407 0.1346 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 55.07 Residual Deviance: 50.09 AIC: 215.9 > glm.nb(y ~ lag1+lag2+lag3, link=identity, etastart=rep(5, 42)) Call: glm.nb(formula = y ~ lag1 + lag2 + lag3, etastart = rep(5, 42), link = identity, init.theta = 4.406504429) Coefficients: (Intercept) lag1 lag2 lag3 2.6298 0.1774 0.1407 0.1346 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 55.07 Residual Deviance: 50.09 AIC: 215.9 > ## End(Don't show) > > > cleanEx() > nameEx("glmmPQL") > ### * glmmPQL > > flush(stderr()); flush(stdout()) > > ### Name: glmmPQL > ### Title: Fit Generalized Linear Mixed Models via PQL > ### Aliases: glmmPQL > ### Keywords: models > > ### ** Examples > > library(nlme) # will be loaded automatically if omitted > summary(glmmPQL(y ~ trt + I(week > 2), random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.410637 0.7800511 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + I(week > 2) Value Std.Error DF t-value p-value (Intercept) 3.412014 0.5185033 169 6.580506 0.0000 trtdrug -1.247355 0.6440635 47 -1.936696 0.0588 trtdrug+ -0.754327 0.6453978 47 -1.168779 0.2484 I(week > 2)TRUE -1.607257 0.3583379 169 -4.485311 0.0000 Correlation: (Intr) trtdrg trtdr+ trtdrug -0.598 trtdrug+ -0.571 0.460 I(week > 2)TRUE -0.537 0.047 -0.001 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -5.1985361 0.1572336 0.3513075 0.4949482 1.7448845 Number of Observations: 220 Number of Groups: 50 > ## Don't show: > # an example of offset > summary(glmmPQL(y ~ trt + week, random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.325243 0.7903088 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + week Value Std.Error DF t-value p-value (Intercept) 3.0302276 0.4791396 169 6.324310 0.0000 trtdrug -1.2176812 0.6160113 47 -1.976719 0.0540 trtdrug+ -0.7886376 0.6193895 47 -1.273250 0.2092 week -0.1446463 0.0392343 169 -3.686730 0.0003 Correlation: (Intr) trtdrg trtdr+ trtdrug -0.622 trtdrug+ -0.609 0.464 week -0.481 0.050 0.030 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -4.2868074 0.2039043 0.3140333 0.5440835 1.9754065 Number of Observations: 220 Number of Groups: 50 > summary(glmmPQL(y ~ trt + week + offset(week), random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.325243 0.7903088 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + week + offset(week) Value Std.Error DF t-value p-value (Intercept) 3.0302276 0.4791396 169 6.324310 0.0000 trtdrug -1.2176812 0.6160113 47 -1.976719 0.0540 trtdrug+ -0.7886376 0.6193895 47 -1.273250 0.2092 week -1.1446463 0.0392343 169 -29.174622 0.0000 Correlation: (Intr) trtdrg trtdr+ trtdrug -0.622 trtdrug+ -0.609 0.464 week -0.481 0.050 0.030 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -4.2868074 0.2039043 0.3140333 0.5440835 1.9754065 Number of Observations: 220 Number of Groups: 50 > ## End(Don't show) > > > cleanEx() detaching ‘package:nlme’ > nameEx("housing") > ### * housing > > flush(stderr()); flush(stdout()) > > ### Name: housing > ### Title: Frequency Table from a Copenhagen Housing Conditions Survey > ### Aliases: housing > ### Keywords: datasets > > ### ** Examples > > options(contrasts = c("contr.treatment", "contr.poly")) > > # Surrogate Poisson models > house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family = poisson, + data = housing) > ## IGNORE_RDIFF_BEGIN > summary(house.glm0, cor = FALSE) Call: glm(formula = Freq ~ Infl * Type * Cont + Sat, family = poisson, data = housing) Deviance Residuals: Min 1Q Median 3Q Max -4.5551 -1.0612 -0.0593 0.6483 4.1478 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 3.136e+00 1.196e-01 26.225 < 2e-16 *** InflMedium 2.733e-01 1.586e-01 1.723 0.084868 . InflHigh -2.054e-01 1.784e-01 -1.152 0.249511 TypeApartment 3.666e-01 1.555e-01 2.357 0.018403 * TypeAtrium -7.828e-01 2.134e-01 -3.668 0.000244 *** TypeTerrace -8.145e-01 2.157e-01 -3.775 0.000160 *** ContHigh -1.490e-15 1.690e-01 0.000 1.000000 Sat.L 1.159e-01 4.038e-02 2.871 0.004094 ** Sat.Q 2.629e-01 4.515e-02 5.824 5.76e-09 *** InflMedium:TypeApartment -1.177e-01 2.086e-01 -0.564 0.572571 InflHigh:TypeApartment 1.753e-01 2.279e-01 0.769 0.441783 InflMedium:TypeAtrium -4.068e-01 3.035e-01 -1.340 0.180118 InflHigh:TypeAtrium -1.692e-01 3.294e-01 -0.514 0.607433 InflMedium:TypeTerrace 6.292e-03 2.860e-01 0.022 0.982450 InflHigh:TypeTerrace -9.305e-02 3.280e-01 -0.284 0.776633 InflMedium:ContHigh -1.398e-01 2.279e-01 -0.613 0.539715 InflHigh:ContHigh -6.091e-01 2.800e-01 -2.176 0.029585 * TypeApartment:ContHigh 5.029e-01 2.109e-01 2.385 0.017083 * TypeAtrium:ContHigh 6.774e-01 2.751e-01 2.462 0.013811 * TypeTerrace:ContHigh 1.099e+00 2.675e-01 4.106 4.02e-05 *** InflMedium:TypeApartment:ContHigh 5.359e-02 2.862e-01 0.187 0.851450 InflHigh:TypeApartment:ContHigh 1.462e-01 3.380e-01 0.432 0.665390 InflMedium:TypeAtrium:ContHigh 1.555e-01 3.907e-01 0.398 0.690597 InflHigh:TypeAtrium:ContHigh 4.782e-01 4.441e-01 1.077 0.281619 InflMedium:TypeTerrace:ContHigh -4.980e-01 3.671e-01 -1.357 0.174827 InflHigh:TypeTerrace:ContHigh -4.470e-01 4.545e-01 -0.984 0.325326 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 833.66 on 71 degrees of freedom Residual deviance: 217.46 on 46 degrees of freedom AIC: 610.43 Number of Fisher Scoring iterations: 5 > ## IGNORE_RDIFF_END > > addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test = "Chisq") Single term additions Model: Freq ~ Infl * Type * Cont + Sat Df Deviance AIC LRT Pr(Chi) 217.46 610.43 Infl:Sat 4 111.08 512.05 106.371 < 2.2e-16 *** Type:Sat 6 156.79 561.76 60.669 3.292e-11 *** Cont:Sat 2 212.33 609.30 5.126 0.07708 . --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) > summary(house.glm1, cor = FALSE) Call: glm(formula = Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont, family = poisson, data = housing) Deviance Residuals: Min 1Q Median 3Q Max -1.6022 -0.5282 -0.0641 0.5757 1.9322 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 3.135074 0.120112 26.101 < 2e-16 *** InflMedium 0.248327 0.159979 1.552 0.120602 InflHigh -0.412645 0.184947 -2.231 0.025671 * TypeApartment 0.292524 0.157477 1.858 0.063231 . TypeAtrium -0.792847 0.214413 -3.698 0.000218 *** TypeTerrace -1.018074 0.221263 -4.601 4.20e-06 *** ContHigh -0.001407 0.169711 -0.008 0.993385 Sat.L -0.098106 0.112592 -0.871 0.383570 Sat.Q 0.285657 0.122283 2.336 0.019489 * InflMedium:TypeApartment -0.017882 0.210496 -0.085 0.932302 InflHigh:TypeApartment 0.386869 0.233297 1.658 0.097263 . InflMedium:TypeAtrium -0.360311 0.304979 -1.181 0.237432 InflHigh:TypeAtrium -0.036788 0.334793 -0.110 0.912503 InflMedium:TypeTerrace 0.185154 0.288892 0.641 0.521580 InflHigh:TypeTerrace 0.310749 0.334815 0.928 0.353345 InflMedium:ContHigh -0.200060 0.228748 -0.875 0.381799 InflHigh:ContHigh -0.725790 0.282352 -2.571 0.010155 * TypeApartment:ContHigh 0.569691 0.212152 2.685 0.007247 ** TypeAtrium:ContHigh 0.702115 0.276056 2.543 0.010979 * TypeTerrace:ContHigh 1.215930 0.269968 4.504 6.67e-06 *** InflMedium:Sat.L 0.519627 0.096830 5.366 8.03e-08 *** InflHigh:Sat.L 1.140302 0.118180 9.649 < 2e-16 *** InflMedium:Sat.Q -0.064474 0.102666 -0.628 0.530004 InflHigh:Sat.Q 0.115436 0.127798 0.903 0.366380 TypeApartment:Sat.L -0.520170 0.109793 -4.738 2.16e-06 *** TypeAtrium:Sat.L -0.288484 0.149551 -1.929 0.053730 . TypeTerrace:Sat.L -0.998666 0.141527 -7.056 1.71e-12 *** TypeApartment:Sat.Q 0.055418 0.118515 0.468 0.640068 TypeAtrium:Sat.Q -0.273820 0.149713 -1.829 0.067405 . TypeTerrace:Sat.Q -0.032328 0.149251 -0.217 0.828520 ContHigh:Sat.L 0.340703 0.087778 3.881 0.000104 *** ContHigh:Sat.Q -0.097929 0.094068 -1.041 0.297851 InflMedium:TypeApartment:ContHigh 0.046900 0.286212 0.164 0.869837 InflHigh:TypeApartment:ContHigh 0.126229 0.338208 0.373 0.708979 InflMedium:TypeAtrium:ContHigh 0.157239 0.390719 0.402 0.687364 InflHigh:TypeAtrium:ContHigh 0.478611 0.444244 1.077 0.281320 InflMedium:TypeTerrace:ContHigh -0.500162 0.367135 -1.362 0.173091 InflHigh:TypeTerrace:ContHigh -0.463099 0.454713 -1.018 0.308467 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 833.657 on 71 degrees of freedom Residual deviance: 38.662 on 34 degrees of freedom AIC: 455.63 Number of Fisher Scoring iterations: 4 > > 1 - pchisq(deviance(house.glm1), house.glm1$df.residual) [1] 0.2671363 > > dropterm(house.glm1, test = "Chisq") Single term deletions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Sat 4 147.780 556.75 109.117 < 2.2e-16 *** Type:Sat 6 100.889 505.86 62.227 1.586e-11 *** Cont:Sat 2 54.722 467.69 16.060 0.0003256 *** Infl:Type:Cont 6 43.952 448.92 5.290 0.5072454 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq") Single term additions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Type:Sat 12 16.107 457.08 22.5550 0.03175 * Infl:Cont:Sat 4 37.472 462.44 1.1901 0.87973 Type:Cont:Sat 6 28.256 457.23 10.4064 0.10855 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > hnames <- lapply(housing[, -5], levels) # omit Freq > newData <- expand.grid(hnames) > newData$Sat <- ordered(newData$Sat) > house.pm <- predict(house.glm1, newData, + type = "response") # poisson means > house.pm <- matrix(house.pm, ncol = 3, byrow = TRUE, + dimnames = list(NULL, hnames[[1]])) > house.pr <- house.pm/drop(house.pm %*% rep(1, 3)) > cbind(expand.grid(hnames[-1]), round(house.pr, 2)) Infl Type Cont Low Medium High 1 Low Tower Low 0.40 0.26 0.34 2 Medium Tower Low 0.26 0.27 0.47 3 High Tower Low 0.15 0.19 0.66 4 Low Apartment Low 0.54 0.23 0.23 5 Medium Apartment Low 0.39 0.26 0.34 6 High Apartment Low 0.26 0.21 0.53 7 Low Atrium Low 0.43 0.32 0.25 8 Medium Atrium Low 0.30 0.35 0.36 9 High Atrium Low 0.19 0.27 0.54 10 Low Terrace Low 0.65 0.22 0.14 11 Medium Terrace Low 0.51 0.27 0.22 12 High Terrace Low 0.37 0.24 0.39 13 Low Tower High 0.30 0.28 0.42 14 Medium Tower High 0.18 0.27 0.54 15 High Tower High 0.10 0.19 0.71 16 Low Apartment High 0.44 0.27 0.30 17 Medium Apartment High 0.30 0.28 0.42 18 High Apartment High 0.18 0.21 0.61 19 Low Atrium High 0.33 0.36 0.31 20 Medium Atrium High 0.22 0.36 0.42 21 High Atrium High 0.13 0.27 0.60 22 Low Terrace High 0.55 0.27 0.19 23 Medium Terrace High 0.40 0.31 0.29 24 High Terrace High 0.27 0.26 0.47 > > # Iterative proportional scaling > loglm(Freq ~ Infl*Type*Cont + Sat*(Infl+Type+Cont), data = housing) Call: loglm(formula = Freq ~ Infl * Type * Cont + Sat * (Infl + Type + Cont), data = housing) Statistics: X^2 df P(> X^2) Likelihood Ratio 38.66222 34 0.2671359 Pearson 38.90831 34 0.2582333 > > > # multinomial model > library(nnet) > (house.mult<- multinom(Sat ~ Infl + Type + Cont, weights = Freq, + data = housing)) # weights: 24 (14 variable) initial value 1846.767257 iter 10 value 1747.045232 final value 1735.041933 converged Call: multinom(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: (Intercept) InflMedium InflHigh TypeApartment TypeAtrium TypeTerrace Medium -0.4192316 0.4464003 0.6649367 -0.4356851 0.1313663 -0.6665728 High -0.1387453 0.7348626 1.6126294 -0.7356261 -0.4079808 -1.4123333 ContHigh Medium 0.3608513 High 0.4818236 Residual Deviance: 3470.084 AIC: 3498.084 > house.mult2 <- multinom(Sat ~ Infl*Type*Cont, weights = Freq, + data = housing) # weights: 75 (48 variable) initial value 1846.767257 iter 10 value 1734.465581 iter 20 value 1717.220153 iter 30 value 1715.760679 iter 40 value 1715.713306 final value 1715.710836 converged > anova(house.mult, house.mult2) Likelihood ratio tests of Multinomial Models Response: Sat Model Resid. df Resid. Dev Test Df LR stat. Pr(Chi) 1 Infl + Type + Cont 130 3470.084 2 Infl * Type * Cont 96 3431.422 1 vs 2 34 38.66219 0.2671367 > > house.pm <- predict(house.mult, expand.grid(hnames[-1]), type = "probs") > cbind(expand.grid(hnames[-1]), round(house.pm, 2)) Infl Type Cont Low Medium High 1 Low Tower Low 0.40 0.26 0.34 2 Medium Tower Low 0.26 0.27 0.47 3 High Tower Low 0.15 0.19 0.66 4 Low Apartment Low 0.54 0.23 0.23 5 Medium Apartment Low 0.39 0.26 0.34 6 High Apartment Low 0.26 0.21 0.53 7 Low Atrium Low 0.43 0.32 0.25 8 Medium Atrium Low 0.30 0.35 0.36 9 High Atrium Low 0.19 0.27 0.54 10 Low Terrace Low 0.65 0.22 0.14 11 Medium Terrace Low 0.51 0.27 0.22 12 High Terrace Low 0.37 0.24 0.39 13 Low Tower High 0.30 0.28 0.42 14 Medium Tower High 0.18 0.27 0.54 15 High Tower High 0.10 0.19 0.71 16 Low Apartment High 0.44 0.27 0.30 17 Medium Apartment High 0.30 0.28 0.42 18 High Apartment High 0.18 0.21 0.61 19 Low Atrium High 0.33 0.36 0.31 20 Medium Atrium High 0.22 0.36 0.42 21 High Atrium High 0.13 0.27 0.60 22 Low Terrace High 0.55 0.27 0.19 23 Medium Terrace High 0.40 0.31 0.29 24 High Terrace High 0.27 0.26 0.47 > > # proportional odds model > house.cpr <- apply(house.pr, 1, cumsum) > logit <- function(x) log(x/(1-x)) > house.ld <- logit(house.cpr[2, ]) - logit(house.cpr[1, ]) > (ratio <- sort(drop(house.ld))) [1] 0.9357341 0.9854433 1.0573182 1.0680491 1.0772649 1.0803574 1.0824895 [8] 1.0998759 1.1199975 1.1554228 1.1768138 1.1866427 1.2091541 1.2435026 [15] 1.2724096 1.2750171 1.2849903 1.3062598 1.3123988 1.3904715 1.4540087 [22] 1.4947753 1.4967585 1.6068789 > mean(ratio) [1] 1.223835 > > (house.plr <- polr(Sat ~ Infl + Type + Cont, + data = housing, weights = Freq)) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: InflMedium InflHigh TypeApartment TypeAtrium TypeTerrace 0.5663937 1.2888191 -0.5723501 -0.3661866 -1.0910149 ContHigh 0.3602841 Intercepts: Low|Medium Medium|High -0.4961353 0.6907083 Residual Deviance: 3479.149 AIC: 3495.149 > > house.pr1 <- predict(house.plr, expand.grid(hnames[-1]), type = "probs") > cbind(expand.grid(hnames[-1]), round(house.pr1, 2)) Infl Type Cont Low Medium High 1 Low Tower Low 0.38 0.29 0.33 2 Medium Tower Low 0.26 0.27 0.47 3 High Tower Low 0.14 0.21 0.65 4 Low Apartment Low 0.52 0.26 0.22 5 Medium Apartment Low 0.38 0.29 0.33 6 High Apartment Low 0.23 0.26 0.51 7 Low Atrium Low 0.47 0.27 0.26 8 Medium Atrium Low 0.33 0.29 0.38 9 High Atrium Low 0.19 0.25 0.56 10 Low Terrace Low 0.64 0.21 0.14 11 Medium Terrace Low 0.51 0.26 0.23 12 High Terrace Low 0.33 0.29 0.38 13 Low Tower High 0.30 0.28 0.42 14 Medium Tower High 0.19 0.25 0.56 15 High Tower High 0.10 0.17 0.72 16 Low Apartment High 0.43 0.28 0.29 17 Medium Apartment High 0.30 0.28 0.42 18 High Apartment High 0.17 0.23 0.60 19 Low Atrium High 0.38 0.29 0.33 20 Medium Atrium High 0.26 0.27 0.47 21 High Atrium High 0.14 0.21 0.64 22 Low Terrace High 0.56 0.25 0.19 23 Medium Terrace High 0.42 0.28 0.30 24 High Terrace High 0.26 0.27 0.47 > > Fr <- matrix(housing$Freq, ncol = 3, byrow = TRUE) > 2*sum(Fr*log(house.pr/house.pr1)) [1] 9.065433 > > house.plr2 <- stepAIC(house.plr, ~.^2) Start: AIC=3495.15 Sat ~ Infl + Type + Cont Df AIC + Infl:Type 6 3484.6 + Type:Cont 3 3492.5 3495.1 + Infl:Cont 2 3498.9 - Cont 1 3507.5 - Type 3 3545.1 - Infl 2 3599.4 Step: AIC=3484.64 Sat ~ Infl + Type + Cont + Infl:Type Df AIC + Type:Cont 3 3482.7 3484.6 + Infl:Cont 2 3488.5 - Infl:Type 6 3495.1 - Cont 1 3497.8 Step: AIC=3482.69 Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Df AIC 3482.7 - Type:Cont 3 3484.6 + Infl:Cont 2 3486.6 - Infl:Type 6 3492.5 > house.plr2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: Sat ~ Infl + Type + Cont Final Model: Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Step Df Deviance Resid. Df Resid. Dev AIC 1 1673 3479.149 3495.149 2 + Infl:Type 6 22.509347 1667 3456.640 3484.640 3 + Type:Cont 3 7.945029 1664 3448.695 3482.695 > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() detaching ‘package:nnet’ > nameEx("huber") > ### * huber > > flush(stderr()); flush(stdout()) > > ### Name: huber > ### Title: Huber M-estimator of Location with MAD Scale > ### Aliases: huber > ### Keywords: robust > > ### ** Examples > > huber(chem) $mu [1] 3.206724 $s [1] 0.526323 > > > > cleanEx() > nameEx("hubers") > ### * hubers > > flush(stderr()); flush(stdout()) > > ### Name: hubers > ### Title: Huber Proposal 2 Robust Estimator of Location and/or Scale > ### Aliases: hubers > ### Keywords: robust > > ### ** Examples > > hubers(chem) $mu [1] 3.205498 $s [1] 0.673652 > hubers(chem, mu=3.68) $mu [1] 3.68 $s [1] 0.9409628 > > > > cleanEx() > nameEx("immer") > ### * immer > > flush(stderr()); flush(stdout()) > > ### Name: immer > ### Title: Yields from a Barley Field Trial > ### Aliases: immer > ### Keywords: datasets > > ### ** Examples > > immer.aov <- aov(cbind(Y1,Y2) ~ Loc + Var, data = immer) > summary(immer.aov) Response Y1 : Df Sum Sq Mean Sq F value Pr(>F) Loc 5 17829.8 3566.0 21.8923 1.751e-07 *** Var 4 2756.6 689.2 4.2309 0.01214 * Residuals 20 3257.7 162.9 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Response Y2 : Df Sum Sq Mean Sq F value Pr(>F) Loc 5 10285.0 2056.99 10.3901 5.049e-05 *** Var 4 2845.2 711.29 3.5928 0.02306 * Residuals 20 3959.5 197.98 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > immer.aov <- aov((Y1+Y2)/2 ~ Var + Loc, data = immer) > summary(immer.aov) Df Sum Sq Mean Sq F value Pr(>F) Var 4 2655 663.7 5.989 0.00245 ** Loc 5 10610 2122.1 19.148 5.21e-07 *** Residuals 20 2217 110.8 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > model.tables(immer.aov, type = "means", se = TRUE, cterms = "Var") Tables of means Grand mean 101.09 Var Var M P S T V 94.39 102.54 91.13 118.20 99.18 Standard errors for differences of means Var 6.078 replic. 6 > > > > cleanEx() > nameEx("isoMDS") > ### * isoMDS > > flush(stderr()); flush(stdout()) > > ### Name: isoMDS > ### Title: Kruskal's Non-metric Multidimensional Scaling > ### Aliases: isoMDS Shepard > ### Keywords: multivariate > > ### ** Examples > > swiss.x <- as.matrix(swiss[, -1]) > swiss.dist <- dist(swiss.x) > swiss.mds <- isoMDS(swiss.dist) initial value 2.979731 iter 5 value 2.431486 iter 10 value 2.343353 final value 2.338839 converged > plot(swiss.mds$points, type = "n") > text(swiss.mds$points, labels = as.character(1:nrow(swiss.x))) > swiss.sh <- Shepard(swiss.dist, swiss.mds$points) > plot(swiss.sh, pch = ".") > lines(swiss.sh$x, swiss.sh$yf, type = "S") > > > > cleanEx() > nameEx("kde2d") > ### * kde2d > > flush(stderr()); flush(stdout()) > > ### Name: kde2d > ### Title: Two-Dimensional Kernel Density Estimation > ### Aliases: kde2d > ### Keywords: dplot > > ### ** Examples > > attach(geyser) > plot(duration, waiting, xlim = c(0.5,6), ylim = c(40,100)) > f1 <- kde2d(duration, waiting, n = 50, lims = c(0.5, 6, 40, 100)) > image(f1, zlim = c(0, 0.05)) > f2 <- kde2d(duration, waiting, n = 50, lims = c(0.5, 6, 40, 100), + h = c(width.SJ(duration), width.SJ(waiting)) ) > image(f2, zlim = c(0, 0.05)) > persp(f2, phi = 30, theta = 20, d = 5) > > plot(duration[-272], duration[-1], xlim = c(0.5, 6), + ylim = c(1, 6),xlab = "previous duration", ylab = "duration") > f1 <- kde2d(duration[-272], duration[-1], + h = rep(1.5, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) > contour(f1, xlab = "previous duration", + ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) > f1 <- kde2d(duration[-272], duration[-1], + h = rep(0.6, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) > contour(f1, xlab = "previous duration", + ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) > f1 <- kde2d(duration[-272], duration[-1], + h = rep(0.4, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) > contour(f1, xlab = "previous duration", + ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) > detach("geyser") > > > > cleanEx() > nameEx("lda") > ### * lda > > flush(stderr()); flush(stdout()) > > ### Name: lda > ### Title: Linear Discriminant Analysis > ### Aliases: lda lda.default lda.data.frame lda.formula lda.matrix > ### model.frame.lda print.lda coef.lda > ### Keywords: multivariate > > ### ** Examples > > Iris <- data.frame(rbind(iris3[,,1], iris3[,,2], iris3[,,3]), + Sp = rep(c("s","c","v"), rep(50,3))) > train <- sample(1:150, 75) > table(Iris$Sp[train]) c s v 20 28 27 > ## your answer may differ > ## c s v > ## 22 23 30 > z <- lda(Sp ~ ., Iris, prior = c(1,1,1)/3, subset = train) > predict(z, Iris[-train, ])$class [1] s s s s s s s s s s s s s s s s s s s s s s c c c c c c c c c c c c c c c v [39] c c c c c c c c c c c c c c v v v v v v v v v v v v v v c v v v v v v v v Levels: c s v > ## [1] s s s s s s s s s s s s s s s s s s s s s s s s s s s c c c > ## [31] c c c c c c c v c c c c v c c c c c c c c c c c c v v v v v > ## [61] v v v v v v v v v v v v v v v > (z1 <- update(z, . ~ . - Petal.W.)) Call: lda(Sp ~ Sepal.L. + Sepal.W. + Petal.L., data = Iris, prior = c(1, 1, 1)/3, subset = train) Prior probabilities of groups: c s v 0.3333333 0.3333333 0.3333333 Group means: Sepal.L. Sepal.W. Petal.L. c 5.975000 2.810000 4.395000 s 4.978571 3.432143 1.460714 v 6.748148 2.988889 5.637037 Coefficients of linear discriminants: LD1 LD2 Sepal.L. 1.1643015 0.68235619 Sepal.W. 0.7945307 2.23093702 Petal.L. -3.0421425 0.01236265 Proportion of trace: LD1 LD2 0.9929 0.0071 > > > > cleanEx() > nameEx("leuk") > ### * leuk > > flush(stderr()); flush(stdout()) > > ### Name: leuk > ### Title: Survival Times and White Blood Counts for Leukaemia Patients > ### Aliases: leuk > ### Keywords: datasets > > ### ** Examples > > library(survival) > plot(survfit(Surv(time) ~ ag, data = leuk), lty = 2:3, col = 2:3) > > # now Cox models > leuk.cox <- coxph(Surv(time) ~ ag + log(wbc), leuk) > summary(leuk.cox) Call: coxph(formula = Surv(time) ~ ag + log(wbc), data = leuk) n= 33, number of events= 33 coef exp(coef) se(coef) z Pr(>|z|) agpresent -1.0691 0.3433 0.4293 -2.490 0.01276 * log(wbc) 0.3677 1.4444 0.1360 2.703 0.00687 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 exp(coef) exp(-coef) lower .95 upper .95 agpresent 0.3433 2.9126 0.148 0.7964 log(wbc) 1.4444 0.6923 1.106 1.8857 Concordance= 0.726 (se = 0.047 ) Likelihood ratio test= 15.64 on 2 df, p=4e-04 Wald test = 15.06 on 2 df, p=5e-04 Score (logrank) test = 16.49 on 2 df, p=3e-04 > > > > cleanEx() detaching ‘package:survival’ > nameEx("lm.ridge") > ### * lm.ridge > > flush(stderr()); flush(stdout()) > > ### Name: lm.ridge > ### Title: Ridge Regression > ### Aliases: lm.ridge plot.ridgelm print.ridgelm select select.ridgelm > ### Keywords: models > > ### ** Examples > > longley # not the same as the S-PLUS dataset GNP.deflator GNP Unemployed Armed.Forces Population Year Employed 1947 83.0 234.289 235.6 159.0 107.608 1947 60.323 1948 88.5 259.426 232.5 145.6 108.632 1948 61.122 1949 88.2 258.054 368.2 161.6 109.773 1949 60.171 1950 89.5 284.599 335.1 165.0 110.929 1950 61.187 1951 96.2 328.975 209.9 309.9 112.075 1951 63.221 1952 98.1 346.999 193.2 359.4 113.270 1952 63.639 1953 99.0 365.385 187.0 354.7 115.094 1953 64.989 1954 100.0 363.112 357.8 335.0 116.219 1954 63.761 1955 101.2 397.469 290.4 304.8 117.388 1955 66.019 1956 104.6 419.180 282.2 285.7 118.734 1956 67.857 1957 108.4 442.769 293.6 279.8 120.445 1957 68.169 1958 110.8 444.546 468.1 263.7 121.950 1958 66.513 1959 112.6 482.704 381.3 255.2 123.366 1959 68.655 1960 114.2 502.601 393.1 251.4 125.368 1960 69.564 1961 115.7 518.173 480.6 257.2 127.852 1961 69.331 1962 116.9 554.894 400.7 282.7 130.081 1962 70.551 > names(longley)[1] <- "y" > lm.ridge(y ~ ., longley) GNP Unemployed Armed.Forces Population 2946.85636017 0.26352725 0.03648291 0.01116105 -1.73702984 Year Employed -1.41879853 0.23128785 > plot(lm.ridge(y ~ ., longley, + lambda = seq(0,0.1,0.001))) > select(lm.ridge(y ~ ., longley, + lambda = seq(0,0.1,0.0001))) modified HKB estimator is 0.006836982 modified L-W estimator is 0.05267247 smallest value of GCV at 0.0057 > > > > cleanEx() > nameEx("loglm") > ### * loglm > > flush(stderr()); flush(stdout()) > > ### Name: loglm > ### Title: Fit Log-Linear Models by Iterative Proportional Scaling > ### Aliases: loglm > ### Keywords: category models > > ### ** Examples > > # The data frames Cars93, minn38 and quine are available > # in the MASS package. > > # Case 1: frequencies specified as an array. > sapply(minn38, function(x) length(levels(x))) hs phs fol sex f 3 4 7 2 0 > ## hs phs fol sex f > ## 3 4 7 2 0 > ##minn38a <- array(0, c(3,4,7,2), lapply(minn38[, -5], levels)) > ##minn38a[data.matrix(minn38[,-5])] <- minn38$f > > ## or more simply > minn38a <- xtabs(f ~ ., minn38) > > fm <- loglm(~ 1 + 2 + 3 + 4, minn38a) # numerals as names. > deviance(fm) [1] 3711.914 > ## [1] 3711.9 > fm1 <- update(fm, .~.^2) > fm2 <- update(fm, .~.^3, print = TRUE) 5 iterations: deviation 0.07512432 > ## 5 iterations: deviation 0.075 > anova(fm, fm1, fm2) LR tests for hierarchical log-linear models Model 1: ~1 + 2 + 3 + 4 Model 2: . ~ `1` + `2` + `3` + `4` + `1`:`2` + `1`:`3` + `1`:`4` + `2`:`3` + `2`:`4` + `3`:`4` Model 3: . ~ `1` + `2` + `3` + `4` + `1`:`2` + `1`:`3` + `1`:`4` + `2`:`3` + `2`:`4` + `3`:`4` + `1`:`2`:`3` + `1`:`2`:`4` + `1`:`3`:`4` + `2`:`3`:`4` Deviance df Delta(Dev) Delta(df) P(> Delta(Dev) Model 1 3711.91367 155 Model 2 220.04285 108 3491.87082 47 0.00000 Model 3 47.74492 36 172.29794 72 0.00000 Saturated 0.00000 0 47.74492 36 0.09114 > > # Case 1. An array generated with xtabs. > > loglm(~ Type + Origin, xtabs(~ Type + Origin, Cars93)) Call: loglm(formula = ~Type + Origin, data = xtabs(~Type + Origin, Cars93)) Statistics: X^2 df P(> X^2) Likelihood Ratio 18.36179 5 0.00252554 Pearson 14.07985 5 0.01511005 > > # Case 2. Frequencies given as a vector in a data frame > names(quine) [1] "Eth" "Sex" "Age" "Lrn" "Days" > ## [1] "Eth" "Sex" "Age" "Lrn" "Days" > fm <- loglm(Days ~ .^2, quine) > gm <- glm(Days ~ .^2, poisson, quine) # check glm. > c(deviance(fm), deviance(gm)) # deviances agree [1] 1368.669 1368.669 > ## [1] 1368.7 1368.7 > c(fm$df, gm$df) # resid df do not! [1] 127 > c(fm$df, gm$df.residual) # resid df do not! [1] 127 128 > ## [1] 127 128 > # The loglm residual degrees of freedom is wrong because of > # a non-detectable redundancy in the model matrix. > > > > cleanEx() > nameEx("logtrans") > ### * logtrans > > flush(stderr()); flush(stdout()) > > ### Name: logtrans > ### Title: Estimate log Transformation Parameter > ### Aliases: logtrans logtrans.formula logtrans.lm logtrans.default > ### Keywords: regression models hplot > > ### ** Examples > > logtrans(Days ~ Age*Sex*Eth*Lrn, data = quine, + alpha = seq(0.75, 6.5, len=20)) > > > > cleanEx() > nameEx("lqs") > ### * lqs > > flush(stderr()); flush(stdout()) > > ### Name: lqs > ### Title: Resistant Regression > ### Aliases: lqs lqs.formula lqs.default lmsreg ltsreg > ### Keywords: models robust > > ### ** Examples > > ## IGNORE_RDIFF_BEGIN > set.seed(123) # make reproducible > lqs(stack.loss ~ ., data = stackloss) Call: lqs.formula(formula = stack.loss ~ ., data = stackloss) Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -3.631e+01 7.292e-01 4.167e-01 -8.131e-17 Scale estimates 0.9149 1.0148 > lqs(stack.loss ~ ., data = stackloss, method = "S", nsamp = "exact") Call: lqs.formula(formula = stack.loss ~ ., data = stackloss, nsamp = "exact", method = "S") Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -35.37611 0.82522 0.44248 -0.07965 Scale estimates 1.912 > ## IGNORE_RDIFF_END > > > > cleanEx() > nameEx("mca") > ### * mca > > flush(stderr()); flush(stdout()) > > ### Name: mca > ### Title: Multiple Correspondence Analysis > ### Aliases: mca print.mca > ### Keywords: category multivariate > > ### ** Examples > > farms.mca <- mca(farms, abbrev=TRUE) > farms.mca Call: mca(df = farms, abbrev = TRUE) Multiple correspondence analysis of 20 cases of 4 factors Correlations 0.806 0.745 cumulative % explained 26.87 51.71 > plot(farms.mca) > > > > cleanEx() > nameEx("menarche") > ### * menarche > > flush(stderr()); flush(stdout()) > > ### Name: menarche > ### Title: Age of Menarche in Warsaw > ### Aliases: menarche > ### Keywords: datasets > > ### ** Examples > > mprob <- glm(cbind(Menarche, Total - Menarche) ~ Age, + binomial(link = probit), data = menarche) > > > > cleanEx() > nameEx("motors") > ### * motors > > flush(stderr()); flush(stdout()) > > ### Name: motors > ### Title: Accelerated Life Testing of Motorettes > ### Aliases: motors > ### Keywords: datasets > > ### ** Examples > > library(survival) > plot(survfit(Surv(time, cens) ~ factor(temp), motors), conf.int = FALSE) > # fit Weibull model > motor.wei <- survreg(Surv(time, cens) ~ temp, motors) > summary(motor.wei) Call: survreg(formula = Surv(time, cens) ~ temp, data = motors) Value Std. Error z p (Intercept) 16.31852 0.62296 26.2 < 2e-16 temp -0.04531 0.00319 -14.2 < 2e-16 Log(scale) -1.09564 0.21480 -5.1 3.4e-07 Scale= 0.334 Weibull distribution Loglik(model)= -147.4 Loglik(intercept only)= -169.5 Chisq= 44.32 on 1 degrees of freedom, p= 2.8e-11 Number of Newton-Raphson Iterations: 7 n= 40 > # and predict at 130C > unlist(predict(motor.wei, data.frame(temp=130), se.fit = TRUE)) fit.1 se.fit.1 33813.06 7506.36 > > motor.cox <- coxph(Surv(time, cens) ~ temp, motors) > summary(motor.cox) Call: coxph(formula = Surv(time, cens) ~ temp, data = motors) n= 40, number of events= 17 coef exp(coef) se(coef) z Pr(>|z|) temp 0.09185 1.09620 0.02736 3.358 0.000786 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 exp(coef) exp(-coef) lower .95 upper .95 temp 1.096 0.9122 1.039 1.157 Concordance= 0.84 (se = 0.035 ) Likelihood ratio test= 25.56 on 1 df, p=4e-07 Wald test = 11.27 on 1 df, p=8e-04 Score (logrank) test = 22.73 on 1 df, p=2e-06 > # predict at temperature 200 > plot(survfit(motor.cox, newdata = data.frame(temp=200), + conf.type = "log-log")) > summary( survfit(motor.cox, newdata = data.frame(temp=130)) ) Call: survfit(formula = motor.cox, newdata = data.frame(temp = 130)) time n.risk n.event survival std.err lower 95% CI upper 95% CI 408 40 4 1.000 0.000254 0.999 1 504 36 3 1.000 0.000498 0.999 1 1344 28 2 0.999 0.001910 0.995 1 1440 26 1 0.998 0.002697 0.993 1 1764 20 1 0.996 0.005325 0.986 1 2772 19 1 0.994 0.007920 0.978 1 3444 18 1 0.991 0.010673 0.971 1 3542 17 1 0.988 0.013667 0.962 1 3780 16 1 0.985 0.016976 0.952 1 4860 15 1 0.981 0.020692 0.941 1 5196 14 1 0.977 0.024941 0.929 1 > > > > cleanEx() detaching ‘package:survival’ > nameEx("muscle") > ### * muscle > > flush(stderr()); flush(stdout()) > > ### Name: muscle > ### Title: Effect of Calcium Chloride on Muscle Contraction in Rat Hearts > ### Aliases: muscle > ### Keywords: datasets > > ### ** Examples > > ## IGNORE_RDIFF_BEGIN > A <- model.matrix(~ Strip - 1, data=muscle) > rats.nls1 <- nls(log(Length) ~ cbind(A, rho^Conc), + data = muscle, start = c(rho=0.1), algorithm="plinear") > (B <- coef(rats.nls1)) rho .lin.StripS01 .lin.StripS02 .lin.StripS03 .lin.StripS04 0.07776401 3.08304824 3.30137838 3.44562531 2.80464434 .lin.StripS05 .lin.StripS06 .lin.StripS07 .lin.StripS08 .lin.StripS09 2.60835015 3.03357725 3.52301734 3.38711844 3.46709396 .lin.StripS10 .lin.StripS11 .lin.StripS12 .lin.StripS13 .lin.StripS14 3.81438456 3.73878664 3.51332581 3.39741115 3.47088608 .lin.StripS15 .lin.StripS16 .lin.StripS17 .lin.StripS18 .lin.StripS19 3.72895847 3.31863862 3.37938673 2.96452195 3.58468686 .lin.StripS20 .lin.StripS21 .lin22 3.39628029 3.36998872 -2.96015460 > > st <- list(alpha = B[2:22], beta = B[23], rho = B[1]) > (rats.nls2 <- nls(log(Length) ~ alpha[Strip] + beta*rho^Conc, + data = muscle, start = st)) Nonlinear regression model model: log(Length) ~ alpha[Strip] + beta * rho^Conc data: muscle alpha..lin.StripS01 alpha..lin.StripS02 alpha..lin.StripS03 alpha..lin.StripS04 3.08305 3.30138 3.44563 2.80464 alpha..lin.StripS05 alpha..lin.StripS06 alpha..lin.StripS07 alpha..lin.StripS08 2.60835 3.03358 3.52302 3.38712 alpha..lin.StripS09 alpha..lin.StripS10 alpha..lin.StripS11 alpha..lin.StripS12 3.46709 3.81438 3.73879 3.51333 alpha..lin.StripS13 alpha..lin.StripS14 alpha..lin.StripS15 alpha..lin.StripS16 3.39741 3.47089 3.72896 3.31864 alpha..lin.StripS17 alpha..lin.StripS18 alpha..lin.StripS19 alpha..lin.StripS20 3.37939 2.96452 3.58469 3.39628 alpha..lin.StripS21 beta..lin22 rho.rho 3.36999 -2.96015 0.07776 residual sum-of-squares: 1.045 Number of iterations to convergence: 0 Achieved convergence tolerance: 4.923e-06 > ## IGNORE_RDIFF_END > > Muscle <- with(muscle, { + Muscle <- expand.grid(Conc = sort(unique(Conc)), Strip = levels(Strip)) + Muscle$Yhat <- predict(rats.nls2, Muscle) + Muscle <- cbind(Muscle, logLength = rep(as.numeric(NA), 126)) + ind <- match(paste(Strip, Conc), + paste(Muscle$Strip, Muscle$Conc)) + Muscle$logLength[ind] <- log(Length) + Muscle}) > > lattice::xyplot(Yhat ~ Conc | Strip, Muscle, as.table = TRUE, + ylim = range(c(Muscle$Yhat, Muscle$logLength), na.rm = TRUE), + subscripts = TRUE, xlab = "Calcium Chloride concentration (mM)", + ylab = "log(Length in mm)", panel = + function(x, y, subscripts, ...) { + panel.xyplot(x, Muscle$logLength[subscripts], ...) + llines(spline(x, y)) + }) > > > > cleanEx() > nameEx("mvrnorm") > ### * mvrnorm > > flush(stderr()); flush(stdout()) > > ### Name: mvrnorm > ### Title: Simulate from a Multivariate Normal Distribution > ### Aliases: mvrnorm > ### Keywords: distribution multivariate > > ### ** Examples > > Sigma <- matrix(c(10,3,3,2),2,2) > Sigma [,1] [,2] [1,] 10 3 [2,] 3 2 > var(mvrnorm(n = 1000, rep(0, 2), Sigma)) [,1] [,2] [1,] 10.697849 3.228279 [2,] 3.228279 2.165271 > var(mvrnorm(n = 1000, rep(0, 2), Sigma, empirical = TRUE)) [,1] [,2] [1,] 10 3 [2,] 3 2 > > > > cleanEx() > nameEx("negative.binomial") > ### * negative.binomial > > flush(stderr()); flush(stdout()) > > ### Name: negative.binomial > ### Title: Family function for Negative Binomial GLMs > ### Aliases: negative.binomial > ### Keywords: regression models > > ### ** Examples > > # Fitting a Negative Binomial model to the quine data > # with theta = 2 assumed known. > # > glm(Days ~ .^4, family = negative.binomial(2), data = quine) Call: glm(formula = Days ~ .^4, family = negative.binomial(2), data = quine) Coefficients: (Intercept) EthN SexM 3.0564 -0.1386 -0.4914 AgeF1 AgeF2 AgeF3 -0.6227 -2.3632 -0.3784 LrnSL EthN:SexM EthN:AgeF1 -1.9577 -0.7524 0.1029 EthN:AgeF2 EthN:AgeF3 EthN:LrnSL -0.5546 0.0633 2.2588 SexM:AgeF1 SexM:AgeF2 SexM:AgeF3 0.4092 3.1098 1.1145 SexM:LrnSL AgeF1:LrnSL AgeF2:LrnSL 1.5900 2.6421 4.8585 AgeF3:LrnSL EthN:SexM:AgeF1 EthN:SexM:AgeF2 NA -0.3105 0.3469 EthN:SexM:AgeF3 EthN:SexM:LrnSL EthN:AgeF1:LrnSL 0.8329 -0.1639 -3.5493 EthN:AgeF2:LrnSL EthN:AgeF3:LrnSL SexM:AgeF1:LrnSL -3.3315 NA -2.4285 SexM:AgeF2:LrnSL SexM:AgeF3:LrnSL EthN:SexM:AgeF1:LrnSL -4.1914 NA 2.1711 EthN:SexM:AgeF2:LrnSL EthN:SexM:AgeF3:LrnSL 2.1029 NA Degrees of Freedom: 145 Total (i.e. Null); 118 Residual Null Deviance: 280.2 Residual Deviance: 172 AIC: 1095 > > > > cleanEx() > nameEx("nlschools") > ### * nlschools > > flush(stderr()); flush(stdout()) > > ### Name: nlschools > ### Title: Eighth-Grade Pupils in the Netherlands > ### Aliases: nlschools > ### Keywords: datasets > > ### ** Examples > > ## Don't show: > op <- options(digits=5) > ## End(Don't show) > nl1 <- within(nlschools, { + IQave <- tapply(IQ, class, mean)[as.character(class)] + IQ <- IQ - IQave + }) > cen <- c("IQ", "IQave", "SES") > nl1[cen] <- scale(nl1[cen], center = TRUE, scale = FALSE) > > nl.lme <- nlme::lme(lang ~ IQ*COMB + IQave + SES, + random = ~ IQ | class, data = nl1) > ## IGNORE_RDIFF_BEGIN > summary(nl.lme) Linear mixed-effects model fit by REML Data: nl1 AIC BIC logLik 15120 15178 -7550.2 Random effects: Formula: ~IQ | class Structure: General positive-definite, Log-Cholesky parametrization StdDev Corr (Intercept) 2.78707 (Intr) IQ 0.48424 -0.516 Residual 6.24839 Fixed effects: lang ~ IQ * COMB + IQave + SES Value Std.Error DF t-value p-value (Intercept) 41.370 0.35364 2151 116.985 0.0000 IQ 2.124 0.10070 2151 21.088 0.0000 COMB1 -1.672 0.58719 130 -2.847 0.0051 IQave 3.248 0.30021 130 10.818 0.0000 SES 0.157 0.01465 2151 10.697 0.0000 IQ:COMB1 0.431 0.18594 2151 2.317 0.0206 Correlation: (Intr) IQ COMB1 IQave SES IQ -0.257 COMB1 -0.609 0.155 IQave -0.049 0.041 0.171 SES 0.010 -0.190 -0.001 -0.168 IQ:COMB1 0.139 -0.522 -0.206 -0.016 -0.003 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -4.059387 -0.631084 0.065519 0.717864 2.794540 Number of Observations: 2287 Number of Groups: 133 > ## IGNORE_RDIFF_END > ## Don't show: > options(op) > ## End(Don't show) > > > > cleanEx() > nameEx("npk") > ### * npk > > flush(stderr()); flush(stdout()) > > ### Name: npk > ### Title: Classical N, P, K Factorial Experiment > ### Aliases: npk > ### Keywords: datasets > > ### ** Examples > > options(contrasts = c("contr.sum", "contr.poly")) > npk.aov <- aov(yield ~ block + N*P*K, npk) > npk.aov Call: aov(formula = yield ~ block + N * P * K, data = npk) Terms: block N P K N:P N:K P:K Sum of Squares 343.2950 189.2817 8.4017 95.2017 21.2817 33.1350 0.4817 Deg. of Freedom 5 1 1 1 1 1 1 Residuals Sum of Squares 185.2867 Deg. of Freedom 12 Residual standard error: 3.929447 1 out of 13 effects not estimable Estimated effects may be unbalanced > summary(npk.aov) Df Sum Sq Mean Sq F value Pr(>F) block 5 343.3 68.66 4.447 0.01594 * N 1 189.3 189.28 12.259 0.00437 ** P 1 8.4 8.40 0.544 0.47490 K 1 95.2 95.20 6.166 0.02880 * N:P 1 21.3 21.28 1.378 0.26317 N:K 1 33.1 33.14 2.146 0.16865 P:K 1 0.5 0.48 0.031 0.86275 Residuals 12 185.3 15.44 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > alias(npk.aov) Model : yield ~ block + N * P * K Complete : (Intercept) block1 block2 block3 block4 block5 N1 P1 K1 N1:P1 N1:K1 N1:P1:K1 0 1 -1 -1 -1 1 0 0 0 0 0 P1:K1 N1:P1:K1 0 > coef(npk.aov) (Intercept) block1 block2 block3 block4 block5 54.8750000 -0.8500000 2.5750000 5.9000000 -4.7500000 -4.3500000 N1 P1 K1 N1:P1 N1:K1 P1:K1 -2.8083333 0.5916667 1.9916667 -0.9416667 -1.1750000 0.1416667 > options(contrasts = c("contr.treatment", "contr.poly")) > npk.aov1 <- aov(yield ~ block + N + K, data = npk) > summary.lm(npk.aov1) Call: aov(formula = yield ~ block + N + K, data = npk) Residuals: Min 1Q Median 3Q Max -6.4083 -2.1438 0.2042 2.3292 7.0750 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 53.208 2.276 23.381 8.5e-14 *** block2 3.425 2.787 1.229 0.23690 block3 6.750 2.787 2.422 0.02769 * block4 -3.900 2.787 -1.399 0.18082 block5 -3.500 2.787 -1.256 0.22723 block6 2.325 2.787 0.834 0.41646 N1 5.617 1.609 3.490 0.00302 ** K1 -3.983 1.609 -2.475 0.02487 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 3.942 on 16 degrees of freedom Multiple R-squared: 0.7163, Adjusted R-squared: 0.5922 F-statistic: 5.772 on 7 and 16 DF, p-value: 0.001805 > se.contrast(npk.aov1, list(N=="0", N=="1"), data = npk) [1] 1.609175 > ## IGNORE_RDIFF_BEGIN > model.tables(npk.aov1, type = "means", se = TRUE) Tables of means Grand mean 54.875 block block 1 2 3 4 5 6 54.03 57.45 60.77 50.12 50.52 56.35 N N 0 1 52.07 57.68 K K 0 1 56.87 52.88 Standard errors for differences of means block N K 2.787 1.609 1.609 replic. 4 12 12 > ## IGNORE_RDIFF_END > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("oats") > ### * oats > > flush(stderr()); flush(stdout()) > > ### Name: oats > ### Title: Data from an Oats Field Trial > ### Aliases: oats > ### Keywords: datasets > > ### ** Examples > > oats$Nf <- ordered(oats$N, levels = sort(levels(oats$N))) > oats.aov <- aov(Y ~ Nf*V + Error(B/V), data = oats, qr = TRUE) > ## IGNORE_RDIFF_BEGIN > summary(oats.aov) Error: B Df Sum Sq Mean Sq F value Pr(>F) Residuals 5 15875 3175 Error: B:V Df Sum Sq Mean Sq F value Pr(>F) V 2 1786 893.2 1.485 0.272 Residuals 10 6013 601.3 Error: Within Df Sum Sq Mean Sq F value Pr(>F) Nf 3 20020 6673 37.686 2.46e-12 *** Nf:V 6 322 54 0.303 0.932 Residuals 45 7969 177 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > summary(oats.aov, split = list(Nf=list(L=1, Dev=2:3))) Error: B Df Sum Sq Mean Sq F value Pr(>F) Residuals 5 15875 3175 Error: B:V Df Sum Sq Mean Sq F value Pr(>F) V 2 1786 893.2 1.485 0.272 Residuals 10 6013 601.3 Error: Within Df Sum Sq Mean Sq F value Pr(>F) Nf 3 20020 6673 37.686 2.46e-12 *** Nf: L 1 19536 19536 110.323 1.09e-13 *** Nf: Dev 2 484 242 1.367 0.265 Nf:V 6 322 54 0.303 0.932 Nf:V: L 2 168 84 0.475 0.625 Nf:V: Dev 4 153 38 0.217 0.928 Residuals 45 7969 177 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > ## IGNORE_RDIFF_END > par(mfrow = c(1,2), pty = "s") > plot(fitted(oats.aov[[4]]), studres(oats.aov[[4]])) > abline(h = 0, lty = 2) > oats.pr <- proj(oats.aov) > qqnorm(oats.pr[[4]][,"Residuals"], ylab = "Stratum 4 residuals") > qqline(oats.pr[[4]][,"Residuals"]) > > par(mfrow = c(1,1), pty = "m") > oats.aov2 <- aov(Y ~ N + V + Error(B/V), data = oats, qr = TRUE) > model.tables(oats.aov2, type = "means", se = TRUE) Warning in model.tables.aovlist(oats.aov2, type = "means", se = TRUE) : SEs for type 'means' are not yet implemented Tables of means Grand mean 103.9722 N N 0.0cwt 0.2cwt 0.4cwt 0.6cwt 79.39 98.89 114.22 123.39 V V Golden.rain Marvellous Victory 104.50 109.79 97.63 > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("parcoord") > ### * parcoord > > flush(stderr()); flush(stdout()) > > ### Name: parcoord > ### Title: Parallel Coordinates Plot > ### Aliases: parcoord > ### Keywords: hplot > > ### ** Examples > > parcoord(state.x77[, c(7, 4, 6, 2, 5, 3)]) > > ir <- rbind(iris3[,,1], iris3[,,2], iris3[,,3]) > parcoord(log(ir)[, c(3, 4, 2, 1)], col = 1 + (0:149)%/%50) > > > > cleanEx() > nameEx("petrol") > ### * petrol > > flush(stderr()); flush(stdout()) > > ### Name: petrol > ### Title: N. L. Prater's Petrol Refinery Data > ### Aliases: petrol > ### Keywords: datasets > > ### ** Examples > > library(nlme) > Petrol <- petrol > Petrol[, 2:5] <- scale(as.matrix(Petrol[, 2:5]), scale = FALSE) > pet3.lme <- lme(Y ~ SG + VP + V10 + EP, + random = ~ 1 | No, data = Petrol) > pet3.lme <- update(pet3.lme, method = "ML") > pet4.lme <- update(pet3.lme, fixed = Y ~ V10 + EP) > anova(pet4.lme, pet3.lme) Model df AIC BIC logLik Test L.Ratio p-value pet4.lme 1 5 149.6119 156.9406 -69.80594 pet3.lme 2 7 149.3833 159.6435 -67.69166 1 vs 2 4.22855 0.1207 > > > > cleanEx() detaching ‘package:nlme’ > nameEx("plot.mca") > ### * plot.mca > > flush(stderr()); flush(stdout()) > > ### Name: plot.mca > ### Title: Plot Method for Objects of Class 'mca' > ### Aliases: plot.mca > ### Keywords: hplot multivariate > > ### ** Examples > > plot(mca(farms, abbrev = TRUE)) > > > > cleanEx() > nameEx("plot.profile") > ### * plot.profile > > flush(stderr()); flush(stdout()) > > ### Name: plot.profile > ### Title: Plotting Functions for 'profile' Objects > ### Aliases: plot.profile pairs.profile > ### Keywords: models hplot > > ### ** Examples > > ## see ?profile.glm for an example using glm fits. > > ## a version of example(profile.nls) from R >= 2.8.0 > fm1 <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD) > pr1 <- profile(fm1, alpha = 0.1) > MASS:::plot.profile(pr1) > pairs(pr1) # a little odd since the parameters are highly correlated > > ## an example from ?nls > x <- -(1:100)/10 > y <- 100 + 10 * exp(x / 2) + rnorm(x)/10 > nlmod <- nls(y ~ Const + A * exp(B * x), start=list(Const=100, A=10, B=1)) > pairs(profile(nlmod)) > > > > cleanEx() > nameEx("polr") > ### * polr > > flush(stderr()); flush(stdout()) > > ### Name: polr > ### Title: Ordered Logistic or Probit Regression > ### Aliases: polr > ### Keywords: models > > ### ** Examples > > options(contrasts = c("contr.treatment", "contr.poly")) > house.plr <- polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) > house.plr Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: InflMedium InflHigh TypeApartment TypeAtrium TypeTerrace 0.5663937 1.2888191 -0.5723501 -0.3661866 -1.0910149 ContHigh 0.3602841 Intercepts: Low|Medium Medium|High -0.4961353 0.6907083 Residual Deviance: 3479.149 AIC: 3495.149 > summary(house.plr, digits = 3) Re-fitting to get Hessian Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: Value Std. Error t value InflMedium 0.566 0.1047 5.41 InflHigh 1.289 0.1272 10.14 TypeApartment -0.572 0.1192 -4.80 TypeAtrium -0.366 0.1552 -2.36 TypeTerrace -1.091 0.1515 -7.20 ContHigh 0.360 0.0955 3.77 Intercepts: Value Std. Error t value Low|Medium -0.496 0.125 -3.974 Medium|High 0.691 0.125 5.505 Residual Deviance: 3479.149 AIC: 3495.149 > ## slightly worse fit from > summary(update(house.plr, method = "probit", Hess = TRUE), digits = 3) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq, Hess = TRUE, method = "probit") Coefficients: Value Std. Error t value InflMedium 0.346 0.0641 5.40 InflHigh 0.783 0.0764 10.24 TypeApartment -0.348 0.0723 -4.81 TypeAtrium -0.218 0.0948 -2.30 TypeTerrace -0.664 0.0918 -7.24 ContHigh 0.222 0.0581 3.83 Intercepts: Value Std. Error t value Low|Medium -0.300 0.076 -3.937 Medium|High 0.427 0.076 5.585 Residual Deviance: 3479.689 AIC: 3495.689 > ## although it is not really appropriate, can fit > summary(update(house.plr, method = "loglog", Hess = TRUE), digits = 3) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq, Hess = TRUE, method = "loglog") Coefficients: Value Std. Error t value InflMedium 0.367 0.0727 5.05 InflHigh 0.790 0.0806 9.81 TypeApartment -0.349 0.0757 -4.61 TypeAtrium -0.196 0.0988 -1.98 TypeTerrace -0.698 0.1043 -6.69 ContHigh 0.268 0.0636 4.21 Intercepts: Value Std. Error t value Low|Medium 0.086 0.083 1.038 Medium|High 0.892 0.087 10.223 Residual Deviance: 3491.41 AIC: 3507.41 > summary(update(house.plr, method = "cloglog", Hess = TRUE), digits = 3) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq, Hess = TRUE, method = "cloglog") Coefficients: Value Std. Error t value InflMedium 0.382 0.0703 5.44 InflHigh 0.915 0.0926 9.89 TypeApartment -0.407 0.0861 -4.73 TypeAtrium -0.281 0.1111 -2.52 TypeTerrace -0.742 0.1013 -7.33 ContHigh 0.209 0.0651 3.21 Intercepts: Value Std. Error t value Low|Medium -0.796 0.090 -8.881 Medium|High 0.055 0.086 0.647 Residual Deviance: 3484.053 AIC: 3500.053 > > predict(house.plr, housing, type = "p") Low Medium High 1 0.3784493 0.2876752 0.3338755 2 0.3784493 0.2876752 0.3338755 3 0.3784493 0.2876752 0.3338755 4 0.2568264 0.2742122 0.4689613 5 0.2568264 0.2742122 0.4689613 6 0.2568264 0.2742122 0.4689613 7 0.1436924 0.2110836 0.6452240 8 0.1436924 0.2110836 0.6452240 9 0.1436924 0.2110836 0.6452240 10 0.5190445 0.2605077 0.2204478 11 0.5190445 0.2605077 0.2204478 12 0.5190445 0.2605077 0.2204478 13 0.3798514 0.2875965 0.3325521 14 0.3798514 0.2875965 0.3325521 15 0.3798514 0.2875965 0.3325521 16 0.2292406 0.2643196 0.5064398 17 0.2292406 0.2643196 0.5064398 18 0.2292406 0.2643196 0.5064398 19 0.4675584 0.2745383 0.2579033 20 0.4675584 0.2745383 0.2579033 21 0.4675584 0.2745383 0.2579033 22 0.3326236 0.2876008 0.3797755 23 0.3326236 0.2876008 0.3797755 24 0.3326236 0.2876008 0.3797755 25 0.1948548 0.2474226 0.5577225 26 0.1948548 0.2474226 0.5577225 27 0.1948548 0.2474226 0.5577225 28 0.6444840 0.2114256 0.1440905 29 0.6444840 0.2114256 0.1440905 30 0.6444840 0.2114256 0.1440905 31 0.5071210 0.2641196 0.2287594 32 0.5071210 0.2641196 0.2287594 33 0.5071210 0.2641196 0.2287594 34 0.3331573 0.2876330 0.3792097 35 0.3331573 0.2876330 0.3792097 36 0.3331573 0.2876330 0.3792097 37 0.2980880 0.2837746 0.4181374 38 0.2980880 0.2837746 0.4181374 39 0.2980880 0.2837746 0.4181374 40 0.1942209 0.2470589 0.5587202 41 0.1942209 0.2470589 0.5587202 42 0.1942209 0.2470589 0.5587202 43 0.1047770 0.1724227 0.7228003 44 0.1047770 0.1724227 0.7228003 45 0.1047770 0.1724227 0.7228003 46 0.4294564 0.2820629 0.2884807 47 0.4294564 0.2820629 0.2884807 48 0.4294564 0.2820629 0.2884807 49 0.2993357 0.2839753 0.4166890 50 0.2993357 0.2839753 0.4166890 51 0.2993357 0.2839753 0.4166890 52 0.1718050 0.2328648 0.5953302 53 0.1718050 0.2328648 0.5953302 54 0.1718050 0.2328648 0.5953302 55 0.3798387 0.2875972 0.3325641 56 0.3798387 0.2875972 0.3325641 57 0.3798387 0.2875972 0.3325641 58 0.2579546 0.2745537 0.4674917 59 0.2579546 0.2745537 0.4674917 60 0.2579546 0.2745537 0.4674917 61 0.1444202 0.2117081 0.6438717 62 0.1444202 0.2117081 0.6438717 63 0.1444202 0.2117081 0.6438717 64 0.5583813 0.2471826 0.1944361 65 0.5583813 0.2471826 0.1944361 66 0.5583813 0.2471826 0.1944361 67 0.4178031 0.2838213 0.2983756 68 0.4178031 0.2838213 0.2983756 69 0.4178031 0.2838213 0.2983756 70 0.2584149 0.2746916 0.4668935 71 0.2584149 0.2746916 0.4668935 72 0.2584149 0.2746916 0.4668935 > addterm(house.plr, ~.^2, test = "Chisq") Single term additions Model: Sat ~ Infl + Type + Cont Df AIC LRT Pr(Chi) 3495.1 Infl:Type 6 3484.6 22.5093 0.0009786 *** Infl:Cont 2 3498.9 0.2090 0.9007957 Type:Cont 3 3492.5 8.6662 0.0340752 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > house.plr2 <- stepAIC(house.plr, ~.^2) Start: AIC=3495.15 Sat ~ Infl + Type + Cont Df AIC + Infl:Type 6 3484.6 + Type:Cont 3 3492.5 3495.1 + Infl:Cont 2 3498.9 - Cont 1 3507.5 - Type 3 3545.1 - Infl 2 3599.4 Step: AIC=3484.64 Sat ~ Infl + Type + Cont + Infl:Type Df AIC + Type:Cont 3 3482.7 3484.6 + Infl:Cont 2 3488.5 - Infl:Type 6 3495.1 - Cont 1 3497.8 Step: AIC=3482.69 Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Df AIC 3482.7 - Type:Cont 3 3484.6 + Infl:Cont 2 3486.6 - Infl:Type 6 3492.5 > house.plr2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: Sat ~ Infl + Type + Cont Final Model: Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Step Df Deviance Resid. Df Resid. Dev AIC 1 1673 3479.149 3495.149 2 + Infl:Type 6 22.509347 1667 3456.640 3484.640 3 + Type:Cont 3 7.945029 1664 3448.695 3482.695 > anova(house.plr, house.plr2) Likelihood ratio tests of ordinal regression models Response: Sat Model Resid. df Resid. Dev Test Df 1 Infl + Type + Cont 1673 3479.149 2 Infl + Type + Cont + Infl:Type + Type:Cont 1664 3448.695 1 vs 2 9 LR stat. Pr(Chi) 1 2 30.45438 0.0003670555 > > house.plr <- update(house.plr, Hess=TRUE) > pr <- profile(house.plr) > confint(pr) 2.5 % 97.5 % InflMedium 0.3616415 0.77195375 InflHigh 1.0409701 1.53958138 TypeApartment -0.8069590 -0.33940432 TypeAtrium -0.6705862 -0.06204495 TypeTerrace -1.3893863 -0.79533958 ContHigh 0.1733589 0.54792854 > plot(pr) > pairs(pr) > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("predict.glmmPQL") > ### * predict.glmmPQL > > flush(stderr()); flush(stdout()) > > ### Name: predict.glmmPQL > ### Title: Predict Method for glmmPQL Fits > ### Aliases: predict.glmmPQL > ### Keywords: models > > ### ** Examples > > fit <- glmmPQL(y ~ trt + I(week > 2), random = ~1 | ID, + family = binomial, data = bacteria) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 > predict(fit, bacteria, level = 0, type="response") [1] 0.9680779 0.9680779 0.8587270 0.8587270 0.9344832 0.9344832 0.7408574 [8] 0.7408574 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 0.9680779 [15] 0.9680779 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 [22] 0.8587270 0.8587270 0.8970307 0.8970307 0.6358511 0.6358511 0.9344832 [29] 0.9344832 0.7408574 0.7408574 0.7408574 0.9680779 0.9680779 0.8587270 [36] 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 [43] 0.9344832 0.7408574 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 [50] 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 0.9680779 0.9680779 [57] 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 0.8970307 [64] 0.8970307 0.6358511 0.6358511 0.6358511 0.9344832 0.9344832 0.7408574 [71] 0.7408574 0.7408574 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 [78] 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 0.9680779 0.9680779 [85] 0.8587270 0.8587270 0.8587270 0.9344832 0.9344832 0.7408574 0.7408574 [92] 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 [99] 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 0.8587270 [106] 0.8587270 0.9344832 0.9344832 0.7408574 0.7408574 0.7408574 0.8970307 [113] 0.8970307 0.6358511 0.6358511 0.9680779 0.9680779 0.8587270 0.9680779 [120] 0.9680779 0.8587270 0.8587270 0.8970307 0.8970307 0.6358511 0.6358511 [127] 0.6358511 0.9344832 0.7408574 0.7408574 0.7408574 0.9680779 0.8587270 [134] 0.8587270 0.8587270 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 [141] 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 0.9344832 0.7408574 [148] 0.8970307 0.8970307 0.6358511 0.6358511 0.9680779 0.9680779 0.8587270 [155] 0.8970307 0.8970307 0.6358511 0.9680779 0.9680779 0.8587270 0.8587270 [162] 0.8587270 0.9344832 0.9344832 0.7408574 0.7408574 0.7408574 0.9680779 [169] 0.9680779 0.8587270 0.8587270 0.8587270 0.9344832 0.7408574 0.8970307 [176] 0.8970307 0.6358511 0.6358511 0.6358511 0.9344832 0.9344832 0.7408574 [183] 0.7408574 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 0.8970307 [190] 0.8970307 0.6358511 0.6358511 0.6358511 0.9344832 0.9344832 0.7408574 [197] 0.7408574 0.7408574 0.8970307 0.6358511 0.6358511 0.9344832 0.9344832 [204] 0.7408574 0.7408574 0.7408574 0.8970307 0.8970307 0.6358511 0.6358511 [211] 0.9344832 0.9344832 0.7408574 0.7408574 0.7408574 0.9344832 0.9344832 [218] 0.7408574 0.7408574 0.7408574 attr(,"label") [1] "Predicted values" > predict(fit, bacteria, level = 1, type="response") X01 X01 X01 X01 X02 X02 X02 X02 0.9828449 0.9828449 0.9198935 0.9198935 0.9050782 0.9050782 0.6564944 0.6564944 X03 X03 X03 X03 X03 X04 X04 X04 0.9724022 0.9724022 0.8759665 0.8759665 0.8759665 0.9851548 0.9851548 0.9300763 X04 X04 X05 X05 X05 X05 X05 X06 0.9300763 0.9300763 0.9851548 0.9851548 0.9300763 0.9300763 0.9300763 0.9662755 X06 X06 X06 X07 X07 X07 X07 X07 0.9662755 0.8516962 0.8516962 0.7291679 0.7291679 0.3504978 0.3504978 0.3504978 X08 X08 X08 X08 X08 X09 X09 X09 0.9426815 0.9426815 0.7672499 0.7672499 0.7672499 0.9851548 0.9851548 0.9300763 X09 X09 X10 X10 X11 X11 X11 X11 0.9300763 0.9300763 0.9640326 0.8430706 0.9851548 0.9851548 0.9300763 0.9300763 X11 X12 X12 X12 X12 X12 X13 X13 0.9300763 0.8334870 0.8334870 0.5008219 0.5008219 0.5008219 0.9851548 0.9851548 X13 X13 X13 X14 X14 X14 X15 X15 0.9300763 0.9300763 0.9300763 0.8907227 0.8907227 0.6203155 0.9724022 0.9724022 X15 X15 X15 X16 X16 X16 X16 X16 0.8759665 0.8759665 0.8759665 0.9287777 0.9287777 0.7232833 0.7232833 0.7232833 X17 X17 X17 X17 X17 X18 X18 X18 0.9426815 0.9426815 0.7672499 0.7672499 0.7672499 0.7070916 0.7070916 0.3260827 X18 X18 X19 X19 X19 X19 X19 X20 0.3260827 0.3260827 0.8702991 0.8702991 0.5735499 0.5735499 0.5735499 0.9736293 X20 X20 X20 X21 X21 X21 X21 X21 0.9736293 0.8809564 0.8809564 0.9851548 0.9851548 0.9300763 0.9300763 0.9300763 Y01 Y01 Y01 Y01 Y01 Y02 Y02 Y02 0.9851548 0.9851548 0.9300763 0.9300763 0.9300763 0.7607971 0.7607971 0.3893126 Y02 Y02 Y03 Y03 Y03 Y03 Y03 Y04 0.3893126 0.3893126 0.8487181 0.8487181 0.5292976 0.5292976 0.5292976 0.5734482 Y04 Y04 Y04 Y05 Y05 Y05 Y06 Y06 0.5734482 0.2122655 0.2122655 0.7144523 0.7144523 0.3339997 0.9828449 0.9828449 Y06 Y06 Y07 Y07 Y07 Y07 Y07 Y08 0.9198935 0.9198935 0.8334870 0.8334870 0.5008219 0.5008219 0.5008219 0.9238389 Y08 Y08 Y08 Y09 Y09 Y09 Y09 Y10 0.7085660 0.7085660 0.7085660 0.9847299 0.9281899 0.9281899 0.9281899 0.9188296 Y10 Y10 Y10 Y10 Y11 Y11 Y11 Y11 0.9188296 0.6940862 0.6940862 0.6940862 0.9851548 0.9851548 0.9300763 0.9300763 Y11 Y12 Y12 Y13 Y13 Y13 Y13 Y14 0.9300763 0.9640326 0.8430706 0.5734482 0.5734482 0.2122655 0.2122655 0.9793383 Y14 Y14 Z01 Z01 Z01 Z02 Z02 Z02 0.9793383 0.9047659 0.9556329 0.9556329 0.8119328 0.9851548 0.9851548 0.9300763 Z02 Z02 Z03 Z03 Z03 Z03 Z03 Z05 0.9300763 0.9300763 0.9779690 0.9779690 0.8989642 0.8989642 0.8989642 0.8702991 Z05 Z05 Z05 Z05 Z06 Z06 Z07 Z07 0.8702991 0.5735499 0.5735499 0.5735499 0.8306525 0.4957505 0.8334870 0.8334870 Z07 Z07 Z07 Z09 Z09 Z09 Z09 Z10 0.5008219 0.5008219 0.5008219 0.9736293 0.9736293 0.8809564 0.8809564 0.9851548 Z10 Z10 Z10 Z10 Z11 Z11 Z11 Z11 0.9851548 0.9300763 0.9300763 0.9300763 0.9724022 0.9724022 0.8759665 0.8759665 Z11 Z14 Z14 Z14 Z14 Z14 Z15 Z15 0.8759665 0.9287777 0.9287777 0.7232833 0.7232833 0.7232833 0.9643851 0.8444172 Z15 Z19 Z19 Z19 Z19 Z19 Z20 Z20 0.8444172 0.9779690 0.9779690 0.8989642 0.8989642 0.8989642 0.7620490 0.7620490 Z20 Z20 Z24 Z24 Z24 Z24 Z24 Z26 0.3909523 0.3909523 0.8487181 0.8487181 0.5292976 0.5292976 0.5292976 0.9287777 Z26 Z26 Z26 Z26 0.9287777 0.7232833 0.7232833 0.7232833 attr(,"label") [1] "Predicted values" > > > > cleanEx() > nameEx("predict.lda") > ### * predict.lda > > flush(stderr()); flush(stdout()) > > ### Name: predict.lda > ### Title: Classify Multivariate Observations by Linear Discrimination > ### Aliases: predict.lda > ### Keywords: multivariate > > ### ** Examples > > tr <- sample(1:50, 25) > train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) > test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) > cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) > z <- lda(train, cl) > predict(z, test)$class [1] s s s s s s s s s s s s s s s s s s s s s s s s s c c c c c c c c c c c c c [39] c c c c c c c c c c c c v v v v v v v v v v v v v v v v v c v v v v v v v Levels: c s v > > > > cleanEx() > nameEx("predict.lqs") > ### * predict.lqs > > flush(stderr()); flush(stdout()) > > ### Name: predict.lqs > ### Title: Predict from an lqs Fit > ### Aliases: predict.lqs > ### Keywords: models > > ### ** Examples > > set.seed(123) > fm <- lqs(stack.loss ~ ., data = stackloss, method = "S", nsamp = "exact") > predict(fm, stackloss) 1 2 3 4 5 6 7 8 35.500000 35.579646 30.409292 19.477876 18.592920 19.035398 19.000000 19.000000 9 10 11 12 13 14 15 16 15.734513 14.079646 13.362832 13.000000 13.920354 13.486726 6.761062 7.000000 17 18 19 20 21 8.557522 8.000000 8.362832 13.154867 23.991150 > > > > cleanEx() > nameEx("predict.qda") > ### * predict.qda > > flush(stderr()); flush(stdout()) > > ### Name: predict.qda > ### Title: Classify from Quadratic Discriminant Analysis > ### Aliases: predict.qda > ### Keywords: multivariate > > ### ** Examples > > tr <- sample(1:50, 25) > train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) > test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) > cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) > zq <- qda(train, cl) > predict(zq, test)$class [1] s s s s s s s s s s s s s s s s s s s s s s s s s c c c c c c c v c c c c c [39] c c c c c c c c c c c c v v v v v v v v v v v v v v v v v v v v v v v v v Levels: c s v > > > > cleanEx() > nameEx("profile.glm") > ### * profile.glm > > flush(stderr()); flush(stdout()) > > ### Name: profile.glm > ### Title: Method for Profiling glm Objects > ### Aliases: profile.glm > ### Keywords: regression models > > ### ** Examples > > options(contrasts = c("contr.treatment", "contr.poly")) > ldose <- rep(0:5, 2) > numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) > sex <- factor(rep(c("M", "F"), c(6, 6))) > SF <- cbind(numdead, numalive = 20 - numdead) > budworm.lg <- glm(SF ~ sex*ldose, family = binomial) > pr1 <- profile(budworm.lg) > plot(pr1) > pairs(pr1) > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("qda") > ### * qda > > flush(stderr()); flush(stdout()) > > ### Name: qda > ### Title: Quadratic Discriminant Analysis > ### Aliases: qda qda.data.frame qda.default qda.formula qda.matrix > ### model.frame.qda print.qda > ### Keywords: multivariate > > ### ** Examples > > tr <- sample(1:50, 25) > train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) > test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) > cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) > z <- qda(train, cl) > predict(z,test)$class [1] s s s s s s s s s s s s s s s s s s s s s s s s s c c c c c c c v c c c c c [39] c c c c c c c c c c c c v v v v v v v v v v v v v v v v v v v v v v v v v Levels: c s v > > > > cleanEx() > nameEx("rational") > ### * rational > > flush(stderr()); flush(stdout()) > > ### Name: rational > ### Title: Rational Approximation > ### Aliases: rational .rat > ### Keywords: math > > ### ** Examples > > X <- matrix(runif(25), 5, 5) > zapsmall(solve(X, X/5)) # print near-zeroes as zero [,1] [,2] [,3] [,4] [,5] [1,] 0.2 0.0 0.0 0.0 0.0 [2,] 0.0 0.2 0.0 0.0 0.0 [3,] 0.0 0.0 0.2 0.0 0.0 [4,] 0.0 0.0 0.0 0.2 0.0 [5,] 0.0 0.0 0.0 0.0 0.2 > rational(solve(X, X/5)) [,1] [,2] [,3] [,4] [,5] [1,] 0.2 0.0 0.0 0.0 0.0 [2,] 0.0 0.2 0.0 0.0 0.0 [3,] 0.0 0.0 0.2 0.0 0.0 [4,] 0.0 0.0 0.0 0.2 0.0 [5,] 0.0 0.0 0.0 0.0 0.2 > > > > cleanEx() > nameEx("renumerate") > ### * renumerate > > flush(stderr()); flush(stdout()) > > ### Name: renumerate > ### Title: Convert a Formula Transformed by 'denumerate' > ### Aliases: renumerate renumerate.formula > ### Keywords: models > > ### ** Examples > > denumerate(~(1+2+3)^3 + a/b) ~(.v1 + .v2 + .v3)^3 + a/b > ## ~ (.v1 + .v2 + .v3)^3 + a/b > renumerate(.Last.value) ~(`1` + `2` + `3`)^3 + a/b > ## ~ (1 + 2 + 3)^3 + a/b > > > > cleanEx() > nameEx("rlm") > ### * rlm > > flush(stderr()); flush(stdout()) > > ### Name: rlm > ### Title: Robust Fitting of Linear Models > ### Aliases: rlm rlm.default rlm.formula print.rlm predict.rlm psi.bisquare > ### psi.hampel psi.huber > ### Keywords: models robust > > ### ** Examples > > summary(rlm(stack.loss ~ ., stackloss)) Call: rlm(formula = stack.loss ~ ., data = stackloss) Residuals: Min 1Q Median 3Q Max -8.91753 -1.73127 0.06187 1.54306 6.50163 Coefficients: Value Std. Error t value (Intercept) -41.0265 9.8073 -4.1832 Air.Flow 0.8294 0.1112 7.4597 Water.Temp 0.9261 0.3034 3.0524 Acid.Conc. -0.1278 0.1289 -0.9922 Residual standard error: 2.441 on 17 degrees of freedom > rlm(stack.loss ~ ., stackloss, psi = psi.hampel, init = "lts") Call: rlm(formula = stack.loss ~ ., data = stackloss, psi = psi.hampel, init = "lts") Converged in 9 iterations Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -40.4747826 0.7410853 1.2250730 -0.1455245 Degrees of freedom: 21 total; 17 residual Scale estimate: 3.09 > rlm(stack.loss ~ ., stackloss, psi = psi.bisquare) Call: rlm(formula = stack.loss ~ ., data = stackloss, psi = psi.bisquare) Converged in 11 iterations Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -42.2852537 0.9275471 0.6507322 -0.1123310 Degrees of freedom: 21 total; 17 residual Scale estimate: 2.28 > > > > cleanEx() > nameEx("rms.curv") > ### * rms.curv > > flush(stderr()); flush(stdout()) > > ### Name: rms.curv > ### Title: Relative Curvature Measures for Non-Linear Regression > ### Aliases: rms.curv print.rms.curv > ### Keywords: nonlinear > > ### ** Examples > > # The treated sample from the Puromycin data > mmcurve <- deriv3(~ Vm * conc/(K + conc), c("Vm", "K"), + function(Vm, K, conc) NULL) > Treated <- Puromycin[Puromycin$state == "treated", ] > (Purfit1 <- nls(rate ~ mmcurve(Vm, K, conc), data = Treated, + start = list(Vm=200, K=0.1))) Nonlinear regression model model: rate ~ mmcurve(Vm, K, conc) data: Treated Vm K 212.68363 0.06412 residual sum-of-squares: 1195 Number of iterations to convergence: 6 Achieved convergence tolerance: 6.096e-06 > rms.curv(Purfit1) Parameter effects: c^theta x sqrt(F) = 0.2121 Intrinsic: c^iota x sqrt(F) = 0.092 > ##Parameter effects: c^theta x sqrt(F) = 0.2121 > ## Intrinsic: c^iota x sqrt(F) = 0.092 > > > > cleanEx() > nameEx("rnegbin") > ### * rnegbin > > flush(stderr()); flush(stdout()) > > ### Name: rnegbin > ### Title: Simulate Negative Binomial Variates > ### Aliases: rnegbin > ### Keywords: distribution > > ### ** Examples > > # Negative Binomials with means fitted(fm) and theta = 4.5 > fm <- glm.nb(Days ~ ., data = quine) > dummy <- rnegbin(fitted(fm), theta = 4.5) > > > > cleanEx() > nameEx("sammon") > ### * sammon > > flush(stderr()); flush(stdout()) > > ### Name: sammon > ### Title: Sammon's Non-Linear Mapping > ### Aliases: sammon > ### Keywords: multivariate > > ### ** Examples > > swiss.x <- as.matrix(swiss[, -1]) > swiss.sam <- sammon(dist(swiss.x)) Initial stress : 0.00824 stress after 10 iters: 0.00439, magic = 0.338 stress after 20 iters: 0.00383, magic = 0.500 stress after 30 iters: 0.00383, magic = 0.500 > plot(swiss.sam$points, type = "n") > text(swiss.sam$points, labels = as.character(1:nrow(swiss.x))) > > > > cleanEx() > nameEx("stepAIC") > ### * stepAIC > > flush(stderr()); flush(stdout()) > > ### Name: stepAIC > ### Title: Choose a model by AIC in a Stepwise Algorithm > ### Aliases: stepAIC extractAIC.gls terms.gls extractAIC.lme terms.lme > ### Keywords: models > > ### ** Examples > > quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) > quine.nxt <- update(quine.hi, . ~ . - Eth:Sex:Age:Lrn) > quine.stp <- stepAIC(quine.nxt, + scope = list(upper = ~Eth*Sex*Age*Lrn, lower = ~1), + trace = FALSE) > quine.stp$anova Stepwise Model Path Analysis of Deviance Table Initial Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Age + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Final Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn Step Df Deviance Resid. Df Resid. Dev AIC 1 120 64.09900 -68.18396 2 - Eth:Sex:Age 3 0.973869 123 65.07287 -71.98244 3 - Sex:Age:Lrn 2 1.526754 125 66.59962 -72.59652 > > cpus1 <- cpus > for(v in names(cpus)[2:7]) + cpus1[[v]] <- cut(cpus[[v]], unique(quantile(cpus[[v]])), + include.lowest = TRUE) > cpus0 <- cpus1[, 2:8] # excludes names, authors' predictions > cpus.samp <- sample(1:209, 100) > cpus.lm <- lm(log10(perf) ~ ., data = cpus1[cpus.samp,2:8]) > cpus.lm2 <- stepAIC(cpus.lm, trace = FALSE) > cpus.lm2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: log10(perf) ~ syct + mmin + mmax + cach + chmin + chmax Final Model: log10(perf) ~ syct + mmax + cach + chmax Step Df Deviance Resid. Df Resid. Dev AIC 1 82 3.458189 -300.4425 2 - chmin 3 0.02548983 85 3.483679 -305.7081 3 - mmin 3 0.12039102 88 3.604070 -308.3106 > > example(birthwt) brthwt> bwt <- with(birthwt, { brthwt+ race <- factor(race, labels = c("white", "black", "other")) brthwt+ ptd <- factor(ptl > 0) brthwt+ ftv <- factor(ftv) brthwt+ levels(ftv)[-(1:2)] <- "2+" brthwt+ data.frame(low = factor(low), age, lwt, race, smoke = (smoke > 0), brthwt+ ptd, ht = (ht > 0), ui = (ui > 0), ftv) brthwt+ }) brthwt> options(contrasts = c("contr.treatment", "contr.poly")) brthwt> glm(low ~ ., binomial, bwt) Call: glm(formula = low ~ ., family = binomial, data = bwt) Coefficients: (Intercept) age lwt raceblack raceother smokeTRUE 0.82302 -0.03723 -0.01565 1.19241 0.74068 0.75553 ptdTRUE htTRUE uiTRUE ftv1 ftv2+ 1.34376 1.91317 0.68020 -0.43638 0.17901 Degrees of Freedom: 188 Total (i.e. Null); 178 Residual Null Deviance: 234.7 Residual Deviance: 195.5 AIC: 217.5 > birthwt.glm <- glm(low ~ ., family = binomial, data = bwt) > birthwt.step <- stepAIC(birthwt.glm, trace = FALSE) > birthwt.step$anova Stepwise Model Path Analysis of Deviance Table Initial Model: low ~ age + lwt + race + smoke + ptd + ht + ui + ftv Final Model: low ~ lwt + race + smoke + ptd + ht + ui Step Df Deviance Resid. Df Resid. Dev AIC 1 178 195.4755 217.4755 2 - ftv 2 1.358185 180 196.8337 214.8337 3 - age 1 1.017866 181 197.8516 213.8516 > birthwt.step2 <- stepAIC(birthwt.glm, ~ .^2 + I(scale(age)^2) + + I(scale(lwt)^2), trace = FALSE) > birthwt.step2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: low ~ age + lwt + race + smoke + ptd + ht + ui + ftv Final Model: low ~ age + lwt + smoke + ptd + ht + ui + ftv + age:ftv + smoke:ui Step Df Deviance Resid. Df Resid. Dev AIC 1 178 195.4755 217.4755 2 + age:ftv 2 12.474896 176 183.0006 209.0006 3 + smoke:ui 1 3.056805 175 179.9438 207.9438 4 - race 2 3.129586 177 183.0734 207.0734 > > quine.nb <- glm.nb(Days ~ .^4, data = quine) > quine.nb2 <- stepAIC(quine.nb) Start: AIC=1095.32 Days ~ (Eth + Sex + Age + Lrn)^4 Df AIC - Eth:Sex:Age:Lrn 2 1092.7 1095.3 Step: AIC=1092.73 Days ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Age + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Df AIC - Eth:Sex:Age 3 1089.4 1092.7 - Eth:Sex:Lrn 1 1093.3 - Eth:Age:Lrn 2 1094.7 - Sex:Age:Lrn 2 1095.0 Step: AIC=1089.41 Days ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Df AIC 1089.4 - Sex:Age:Lrn 2 1091.1 - Eth:Age:Lrn 2 1091.2 - Eth:Sex:Lrn 1 1092.5 > quine.nb2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: Days ~ (Eth + Sex + Age + Lrn)^4 Final Model: Days ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Step Df Deviance Resid. Df Resid. Dev AIC 1 118 167.4535 1095.324 2 - Eth:Sex:Age:Lrn 2 0.09746244 120 167.5509 1092.728 3 - Eth:Sex:Age 3 0.11060087 123 167.4403 1089.409 > > > > cleanEx() > nameEx("summary.negbin") > ### * summary.negbin > > flush(stderr()); flush(stdout()) > > ### Name: summary.negbin > ### Title: Summary Method Function for Objects of Class 'negbin' > ### Aliases: summary.negbin print.summary.negbin > ### Keywords: models > > ### ** Examples > > summary(glm.nb(Days ~ Eth*Age*Lrn*Sex, quine, link = log)) Call: glm.nb(formula = Days ~ Eth * Age * Lrn * Sex, data = quine, link = log, init.theta = 1.928360145) Deviance Residuals: Min 1Q Median 3Q Max -3.2377 -0.9079 -0.2019 0.5173 1.7043 Coefficients: (4 not defined because of singularities) Estimate Std. Error z value Pr(>|z|) (Intercept) 3.0564 0.3760 8.128 4.38e-16 *** EthN -0.1386 0.5334 -0.260 0.795023 AgeF1 -0.6227 0.5125 -1.215 0.224334 AgeF2 -2.3632 1.0770 -2.194 0.028221 * AgeF3 -0.3784 0.4546 -0.832 0.405215 LrnSL -1.9577 0.9967 -1.964 0.049493 * SexM -0.4914 0.5104 -0.963 0.335653 EthN:AgeF1 0.1029 0.7123 0.144 0.885175 EthN:AgeF2 -0.5546 1.6798 -0.330 0.741297 EthN:AgeF3 0.0633 0.6396 0.099 0.921159 EthN:LrnSL 2.2588 1.3019 1.735 0.082743 . AgeF1:LrnSL 2.6421 1.0821 2.442 0.014618 * AgeF2:LrnSL 4.8585 1.4423 3.369 0.000755 *** AgeF3:LrnSL NA NA NA NA EthN:SexM -0.7524 0.7220 -1.042 0.297400 AgeF1:SexM 0.4092 0.8299 0.493 0.621973 AgeF2:SexM 3.1098 1.1655 2.668 0.007624 ** AgeF3:SexM 1.1145 0.6365 1.751 0.079926 . LrnSL:SexM 1.5900 1.1499 1.383 0.166750 EthN:AgeF1:LrnSL -3.5493 1.4270 -2.487 0.012876 * EthN:AgeF2:LrnSL -3.3315 2.0919 -1.593 0.111256 EthN:AgeF3:LrnSL NA NA NA NA EthN:AgeF1:SexM -0.3105 1.2055 -0.258 0.796735 EthN:AgeF2:SexM 0.3469 1.7965 0.193 0.846875 EthN:AgeF3:SexM 0.8329 0.8970 0.929 0.353092 EthN:LrnSL:SexM -0.1639 1.5250 -0.107 0.914411 AgeF1:LrnSL:SexM -2.4285 1.4201 -1.710 0.087246 . AgeF2:LrnSL:SexM -4.1914 1.6201 -2.587 0.009679 ** AgeF3:LrnSL:SexM NA NA NA NA EthN:AgeF1:LrnSL:SexM 2.1711 1.9192 1.131 0.257963 EthN:AgeF2:LrnSL:SexM 2.1029 2.3444 0.897 0.369718 EthN:AgeF3:LrnSL:SexM NA NA NA NA --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for Negative Binomial(1.9284) family taken to be 1) Null deviance: 272.29 on 145 degrees of freedom Residual deviance: 167.45 on 118 degrees of freedom AIC: 1097.3 Number of Fisher Scoring iterations: 1 Theta: 1.928 Std. Err.: 0.269 2 x log-likelihood: -1039.324 > > > > cleanEx() > nameEx("summary.rlm") > ### * summary.rlm > > flush(stderr()); flush(stdout()) > > ### Name: summary.rlm > ### Title: Summary Method for Robust Linear Models > ### Aliases: summary.rlm print.summary.rlm > ### Keywords: robust > > ### ** Examples > > summary(rlm(calls ~ year, data = phones, maxit = 50)) Call: rlm(formula = calls ~ year, data = phones, maxit = 50) Residuals: Min 1Q Median 3Q Max -18.314 -5.953 -1.681 26.460 173.769 Coefficients: Value Std. Error t value (Intercept) -102.6222 26.6082 -3.8568 year 2.0414 0.4299 4.7480 Residual standard error: 9.032 on 22 degrees of freedom > > > > cleanEx() > nameEx("theta.md") > ### * theta.md > > flush(stderr()); flush(stdout()) > > ### Name: theta.md > ### Title: Estimate theta of the Negative Binomial > ### Aliases: theta.md theta.ml theta.mm > ### Keywords: models > > ### ** Examples > > quine.nb <- glm.nb(Days ~ .^2, data = quine) > theta.md(quine$Days, fitted(quine.nb), dfr = df.residual(quine.nb)) [1] 1.135441 > theta.ml(quine$Days, fitted(quine.nb)) [1] 1.603641 attr(,"SE") [1] 0.2138379 > theta.mm(quine$Days, fitted(quine.nb), dfr = df.residual(quine.nb)) [1] 1.562879 > > ## weighted example > yeast <- data.frame(cbind(numbers = 0:5, fr = c(213, 128, 37, 18, 3, 1))) > fit <- glm.nb(numbers ~ 1, weights = fr, data = yeast) > summary(fit) Call: glm.nb(formula = numbers ~ 1, data = yeast, weights = fr, init.theta = 3.586087428, link = log) Deviance Residuals: 1 2 3 4 5 6 -16.314 3.682 6.923 7.555 4.033 2.813 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -0.38199 0.06603 -5.785 7.25e-09 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for Negative Binomial(3.5861) family taken to be 1) Null deviance: 408.9 on 5 degrees of freedom Residual deviance: 408.9 on 5 degrees of freedom AIC: 897.06 Number of Fisher Scoring iterations: 1 Theta: 3.59 Std. Err.: 1.75 2 x log-likelihood: -893.063 > mu <- fitted(fit) > theta.md(yeast$numbers, mu, dfr = 399, weights = yeast$fr) [1] 3.027079 > theta.ml(yeast$numbers, mu, limit = 15, weights = yeast$fr) [1] 3.586087 attr(,"SE") [1] 1.749609 > theta.mm(yeast$numbers, mu, dfr = 399, weights = yeast$fr) [1] 3.549593 > > > > cleanEx() > nameEx("ucv") > ### * ucv > > flush(stderr()); flush(stdout()) > > ### Name: ucv > ### Title: Unbiased Cross-Validation for Bandwidth Selection > ### Aliases: ucv > ### Keywords: dplot > > ### ** Examples > > ucv(geyser$duration) Warning in ucv(geyser$duration) : minimum occurred at one end of the range [1] 0.1746726 > > > > cleanEx() > nameEx("waders") > ### * waders > > flush(stderr()); flush(stdout()) > > ### Name: waders > ### Title: Counts of Waders at 15 Sites in South Africa > ### Aliases: waders > ### Keywords: datasets > > ### ** Examples > > plot(corresp(waders, nf=2)) > > > > cleanEx() > nameEx("whiteside") > ### * whiteside > > flush(stderr()); flush(stdout()) > > ### Name: whiteside > ### Title: House Insulation: Whiteside's Data > ### Aliases: whiteside > ### Keywords: datasets > > ### ** Examples > > require(lattice) Loading required package: lattice > xyplot(Gas ~ Temp | Insul, whiteside, panel = + function(x, y, ...) { + panel.xyplot(x, y, ...) + panel.lmline(x, y, ...) + }, xlab = "Average external temperature (deg. C)", + ylab = "Gas consumption (1000 cubic feet)", aspect = "xy", + strip = function(...) strip.default(..., style = 1)) > > gasB <- lm(Gas ~ Temp, whiteside, subset = Insul=="Before") > gasA <- update(gasB, subset = Insul=="After") > summary(gasB) Call: lm(formula = Gas ~ Temp, data = whiteside, subset = Insul == "Before") Residuals: Min 1Q Median 3Q Max -0.62020 -0.19947 0.06068 0.16770 0.59778 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.85383 0.11842 57.88 <2e-16 *** Temp -0.39324 0.01959 -20.08 <2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.2813 on 24 degrees of freedom Multiple R-squared: 0.9438, Adjusted R-squared: 0.9415 F-statistic: 403.1 on 1 and 24 DF, p-value: < 2.2e-16 > summary(gasA) Call: lm(formula = Gas ~ Temp, data = whiteside, subset = Insul == "After") Residuals: Min 1Q Median 3Q Max -0.97802 -0.11082 0.02672 0.25294 0.63803 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 4.72385 0.12974 36.41 < 2e-16 *** Temp -0.27793 0.02518 -11.04 1.05e-11 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.3548 on 28 degrees of freedom Multiple R-squared: 0.8131, Adjusted R-squared: 0.8064 F-statistic: 121.8 on 1 and 28 DF, p-value: 1.046e-11 > gasBA <- lm(Gas ~ Insul/Temp - 1, whiteside) > summary(gasBA) Call: lm(formula = Gas ~ Insul/Temp - 1, data = whiteside) Residuals: Min 1Q Median 3Q Max -0.97802 -0.18011 0.03757 0.20930 0.63803 Coefficients: Estimate Std. Error t value Pr(>|t|) InsulBefore 6.85383 0.13596 50.41 <2e-16 *** InsulAfter 4.72385 0.11810 40.00 <2e-16 *** InsulBefore:Temp -0.39324 0.02249 -17.49 <2e-16 *** InsulAfter:Temp -0.27793 0.02292 -12.12 <2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.323 on 52 degrees of freedom Multiple R-squared: 0.9946, Adjusted R-squared: 0.9942 F-statistic: 2391 on 4 and 52 DF, p-value: < 2.2e-16 > > gasQ <- lm(Gas ~ Insul/(Temp + I(Temp^2)) - 1, whiteside) > coef(summary(gasQ)) Estimate Std. Error t value Pr(>|t|) InsulBefore 6.759215179 0.150786777 44.826312 4.854615e-42 InsulAfter 4.496373920 0.160667904 27.985514 3.302572e-32 InsulBefore:Temp -0.317658735 0.062965170 -5.044991 6.362323e-06 InsulAfter:Temp -0.137901603 0.073058019 -1.887563 6.489554e-02 InsulBefore:I(Temp^2) -0.008472572 0.006624737 -1.278930 2.068259e-01 InsulAfter:I(Temp^2) -0.014979455 0.007447107 -2.011446 4.968398e-02 > > gasPR <- lm(Gas ~ Insul + Temp, whiteside) > anova(gasPR, gasBA) Analysis of Variance Table Model 1: Gas ~ Insul + Temp Model 2: Gas ~ Insul/Temp - 1 Res.Df RSS Df Sum of Sq F Pr(>F) 1 53 6.7704 2 52 5.4252 1 1.3451 12.893 0.0007307 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > options(contrasts = c("contr.treatment", "contr.poly")) > gasBA1 <- lm(Gas ~ Insul*Temp, whiteside) > coef(summary(gasBA1)) Estimate Std. Error t value Pr(>|t|) (Intercept) 6.8538277 0.13596397 50.409146 7.997414e-46 InsulAfter -2.1299780 0.18009172 -11.827185 2.315921e-16 Temp -0.3932388 0.02248703 -17.487358 1.976009e-23 InsulAfter:Temp 0.1153039 0.03211212 3.590665 7.306852e-04 > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() detaching ‘package:lattice’ > nameEx("width.SJ") > ### * width.SJ > > flush(stderr()); flush(stdout()) > > ### Name: width.SJ > ### Title: Bandwidth Selection by Pilot Estimation of Derivatives > ### Aliases: width.SJ > ### Keywords: dplot > > ### ** Examples > > width.SJ(geyser$duration, method = "dpi") [1] 0.5747852 > width.SJ(geyser$duration) [1] 0.360518 > > width.SJ(galaxies, method = "dpi") [1] 3256.151 > width.SJ(galaxies) [1] 2566.423 > > > > cleanEx() > nameEx("wtloss") > ### * wtloss > > flush(stderr()); flush(stdout()) > > ### Name: wtloss > ### Title: Weight Loss Data from an Obese Patient > ### Aliases: wtloss > ### Keywords: datasets > > ### ** Examples > > ## IGNORE_RDIFF_BEGIN > wtloss.fm <- nls(Weight ~ b0 + b1*2^(-Days/th), + data = wtloss, start = list(b0=90, b1=95, th=120)) > wtloss.fm Nonlinear regression model model: Weight ~ b0 + b1 * 2^(-Days/th) data: wtloss b0 b1 th 81.37 102.68 141.91 residual sum-of-squares: 39.24 Number of iterations to convergence: 3 Achieved convergence tolerance: 4.324e-06 > ## IGNORE_RDIFF_END > plot(wtloss) > with(wtloss, lines(Days, fitted(wtloss.fm))) > > > > ### *