MASS/0000755000176000001440000000000012620401172011040 5ustar ripleyusersMASS/po/0000755000176000001440000000000012315222230011453 5ustar ripleyusersMASS/po/R-fr.po0000644000176000001440000004470512312015432012634 0ustar ripleyusers# Translation of R-MASS.pot to French # Copyright (C) 2005 The R Foundation # This file is distributed under the same license as the MASS R package. # Philippe Grosjean , 2005. # msgid "" msgstr "" "Project-Id-Version: MASS 7.2-20\n" "Report-Msgid-Bugs-To: bugs@r-project.org\n" "POT-Creation-Date: 2013-03-18 09:49\n" "PO-Revision-Date: 2014-03-18 11:08+0100\n" "Last-Translator: Philippe Grosjean \n" "Language-Team: French \n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=ISO-8859-1\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "X-Generator: Poedit 1.6.4\n" msgid "no terms in scope" msgstr "aucun terme dans la portée de la formule" msgid "no terms in scope for adding to object" msgstr "aucun terme dans la portée de la formule à ajouter à l'objet" msgid "trying + %s" msgstr "essai de + %s" msgid "number of rows in use has changed: remove missing values?" msgstr "" "le nombre de lignes utilisées a changé : retirer les valeurs manquantes ?" msgid "F test assumes 'quasi%s' family" msgstr "le test F suppose la famille 'quasi%s'" msgid "no 'addterm' method implemented for \"mlm\" models" msgstr "pas de méthode 'addterm' implémentée pour les modèles\" mlm\"" msgid "scope is not a subset of term labels" msgstr "" "la portée de la formule n'est pas un sous-ensemble des étiquettes des termes" msgid "trying - %s" msgstr "essai de - %s" msgid "'dropterm' not implemented for \"mlm\" fits" msgstr "'dropterm' n'est pas implémenté pour les ajustements \"mlm\"" msgid "iteration limit reached near 'x = %f'" msgstr "nombre limite d'iterations atteint vers x = %f" msgid "%s does not have both 'qr' and 'y' components" msgstr "%s n'a pas les composantes 'qr' et 'y'" msgid "response variable must be positive" msgstr "la variable réponse doit être positive" msgid "Waiting for profiling to be done..." msgstr "Attente de la réalisation du profilage..." msgid "invalid number of levels" msgstr "nombre de niveaux incorrect" msgid "frequency table is %d-dimensional" msgstr "le tableau de fréquences a %d dimensions" msgid "invalid table specification" msgstr "spécification de table de contingence incorrecte" msgid "higher-way table requested. Only 2-way allowed" msgstr "" "nombre de dimensions de la table insuffisant. Seuls deux dimensions sont " "admises" msgid "negative or non-integer entries in table" msgstr "valeurs négatives ou non entières dans le tableau" msgid "all frequencies are zero" msgstr "toutes les fréquences sont nulles" msgid "empty row or column in table" msgstr "ligne ou colonne vide dans la table" msgid "biplot is only possible if nf >= 2" msgstr "le biplot est possible seulement lorsque nf >= 2" msgid "missing or infinite values in 'x'" msgstr "valeurs manquantes ou infinies dans 'x'" msgid "length of 'wt' must equal number of observations" msgstr "la longueur de 'wt' doit être égale au nombre d'observations" msgid "negative weights not allowed" msgstr "poids négatifs non admis" msgid "no positive weights" msgstr "aucun poids positif" msgid "'center' is not the right length" msgstr "'center' n'a pas la bonne longueur" msgid "Probable convergence failure" msgstr "Absence probable de convergence" msgid "'uin' is too large to fit plot in" msgstr "'uin' est trop grand pour y ajuster le graphique" msgid "'x' must be a non-empty numeric vector" msgstr "'x' doit être un vecteur numérique non vide" msgid "'x' contains missing or infinite values" msgstr "valeurs manquantes ou infinies dans 'x'" msgid "'densfun' must be supplied as a function or name" msgstr "'densfun' doit être fourni comme une fonction ou comme un nom" msgid "unsupported distribution" msgstr "distribution non supportée" msgid "supplying pars for the %s distribution is not supported" msgstr "fixer des paramètres pour distribution %s n'est pas supporté" msgid "need positive values to fit a log-Normal" msgstr "des valeurs positives sont requises pour ajuster un modèle log-Normal" msgid "Exponential values must be >= 0" msgstr "Les valeurs exponentielles doivent être >= 0" msgid "Weibull values must be > 0" msgstr "Les valeurs Weibull doivent être > 0" msgid "gamma values must be >= 0" msgstr "Les valeurs gamma doivent être >= 0" msgid "'start' must be a named list" msgstr "'start' doit être une liste nommée" msgid "'start' specifies names which are not arguments to 'densfun'" msgstr "'start' spécifie des noms qui ne sont pas des arguments de 'densfun'" msgid "optimization failed" msgstr "l'optimisation a échoué" msgid "only 'REML = FALSE' is implemented" msgstr "seulement 'REML = FALSE' est implémenté" msgid "Initial estimate: %s" msgstr "Estimations initiales : %s" msgid "Iter. %d Alpha: %s" msgstr "Itér. %d Alpha : %s" msgid "iteration limit reached" msgstr "nombre limite d'iterations atteint" msgid "package 'nlme' is essential" msgstr "le package 'nlme' est essentiel" msgid "'family' not recognized" msgstr "'family' non reconnu" msgid "iteration %d" msgstr "itération %d" msgid "'anova' is not available for PQL fits" msgstr "'anova' n'est pas disponible pour un ajustement PQL" msgid "cannot estimate scale: MAD is zero for this sample" msgstr "impossible d'estimer l'échelle : MAD est nul pour cet échantillon" msgid "an initial configuration must be supplied with NA/Infs in 'd'" msgstr "une configuration initiale doit être fournie avec NA/Infs dans 'd'" msgid "'y' must be a matrix" msgstr "'y' doit être une matrice" msgid "distances must be result of 'dist' or a square matrix" msgstr "les distances doivent provenir de 'dist', ou être une matrice carrée" msgid "invalid size" msgstr "taille incorrecte" msgid "zero or negative distance between objects %d and %d" msgstr "distance négative ou nulle entre les objets %d et %d" msgid "not enough non-missing data" msgstr "pas assez de données non manquantes" msgid "invalid initial configuration" msgstr "configuration initale incorrecte" msgid "initial configuration must be complete" msgstr "la configuration initiale doit être complète" msgid "invalid row(x)" msgstr "row(x) incorrect" msgid "invalid length(d)" msgstr "length(d) incorrect" msgid "data vectors must be the same length" msgstr "les vecteurs de données doivent avoir la même longueur" msgid "missing or infinite values in the data are not allowed" msgstr "les valeurs manquantes ou infinies ne sont pas admises" msgid "only finite values are allowed in 'lims'" msgstr "seules des valeurs finies sont autorisées pour 'lims'" msgid "bandwidths must be strictly positive" msgstr "les largeurs de bandes doivent être strictement positives" msgid "'x' is not a matrix" msgstr "'x' n'est pas une matrice" msgid "infinite, NA or NaN values in 'x'" msgstr "valeurs infinies, NA ou NaN dans 'x'" msgid "nrow(x) and length(grouping) are different" msgstr "nrow(x) et length(grouping) sont différents" msgid "invalid 'prior'" msgstr "'prior' incorrect" msgid "'prior' is of incorrect length" msgstr "'prior' est de longueur incorrecte" msgid "cannot use leave-one-out CV with method %s" msgstr "impossible d'utiliser la VC par eustachage pouri %s" msgid "rank = 0: variables are numerically constant" msgstr "rank = 0 : les variables sont numériquement constantes" msgid "variables are collinear" msgstr "les variables sont collinéaires" msgid "'nu' must exceed 2" msgstr "'nu' doit être supérieur à 2" msgid "group means are numerically identical" msgstr "les moyennes par groupes sont numériquement identiques" msgid "object not of class \"lda\"" msgstr "l'objet n'est pas de la classe \"lda\"" msgid "wrong number of variables" msgstr "nombre de variables incorrect" msgid "variable names in 'newdata' do not match those in 'object'" msgstr "" "les noms des variables de 'newdata' ne correspondent pas à ceux de 'object'" msgid "'breaks' must be strictly increasing" msgstr "'breaks' doit être strictement croissant" msgid "'breaks' do not cover the data" msgstr "'breaks' ne recouvrent pas toutes les données" msgid "dim(W) is not correct" msgstr "dim(W) est incorrect" msgid "'W' is not positive definite" msgstr "'W' n'est pas définie positive" msgid "'data' has no 'terms' attribute" msgstr "'data' n'a pas d'attribut 'terms'" msgid "formula specifies no response" msgstr "la formule ne spécifie aucune réponse" msgid "'object' has no 'call' component. Updating not possible" msgstr "'object' n'a pas de composante 'call'. Mise à jour impossible" msgid "Response variable must be positive after additions" msgstr "La variable de réponse doit être positive après les additions" msgid "missing values are not allowed" msgstr "les valeurs manquantes ne sont pas admises" msgid "'x' and 'y' must have the same number of rows" msgstr "'x' et 'y' doivent avoir le même nombre de lignes" msgid "'quantile' must be at most %d" msgstr "'quantile' doit valoir au plus %d" msgid "'ps' must be at least 'p'" msgstr "'ps' doit valoir au moins 'p'" msgid "'lqs' failed: all the samples were singular" msgstr "'lqs' a échoué : tous les échantillons étaient singuliers" msgid "missing or infinite values are not allowed" msgstr "les valeurs manquantes ou infinies ne sont pas admises" msgid "at least %d cases are needed" msgstr "il faut au moins %d cas" msgid "'quantile' must be at least %d" msgstr "'quantile' doit valoir au moins %d" msgid "at least one column has IQR 0" msgstr "au moins une des colonnes a un EIQ de 0" msgid "'x' is probably collinear" msgstr "'x' est probablement collinéaire" msgid "all variables must be factors" msgstr "toutes les variables doivent être des facteurs" msgid "factors in 'newdata' do not match those for 'object'" msgstr "les facteurs de 'newdata' ne correspondent pas à ceux de 'object'" msgid "'newdata' is not of the right length" msgstr "'newdata' n'a pas la bonne longueur" msgid "'X' must be a numeric or complex matrix" msgstr "'X' doit être une matrice numérique ou complexe" msgid "incompatible arguments" msgstr "arguments incompatibles" msgid "'Sigma' is not positive definite" msgstr "'Sigma' n'est définie positive" msgid "'theta' must be given" msgstr "'theta' doit être fourni" msgid "negative values not allowed for the negative binomial family" msgstr "valeurs négatives non autorisées pour la famille binomiale négative" msgid "tests made without re-estimating 'theta'" msgstr "tests réalisés sans ré-estimer 'theta'" msgid "only Chi-squared LR tests are implemented" msgstr "seuls les tests Chi-deux LR sont implémentés" msgid "not all objects are of class \"negbin\"" msgstr "tous les objets ne sont pas de la classe \"negbin\"" msgid "unimplemented method: %s" msgstr "méthode non implémentée: %s" msgid "Initial fit:" msgstr "Ajustement initial :" msgid "Initial value for 'theta': %f" msgstr "Valeur initiale pour 'theta' : %f" msgid "alternation limit reached" msgstr "limite d'alternation atteinte" msgid "'theta' must be specified" msgstr "'theta' doit être spécifié" msgid "" "\"%s\" link not available for negative binomial family; available links are " "\"identity\", \"log\" and \"sqrt\"" msgstr "" "lien \"%s\" non disponible pour la famille binomiale négative ; les liens " "possibles sont \"identity\", \"log\" et \"sqrt\"" msgid "estimate truncated at zero" msgstr "estimation tronquée à zéro" msgid "theta.ml: iter" msgstr "theta.lm : itér" msgid "theta =" msgstr "theta =" msgid "extra arguments discarded" msgstr "arguments supplémentaires ignorés" msgid "at least 3 distinct 'x' values are needed" msgstr "il faut au moins 3 valeurs différentes de 'x'" msgid "an intercept is needed and assumed" msgstr "une coordonnée à l'origine est nécessaire et assumée" msgid "response must be a factor" msgstr "la réponse doit être un facteur" msgid "response must have 3 or more levels" msgstr "la réponse doit avoir au moins 3 niveaux" msgid "attempt to find suitable starting values failed" msgstr "la recherche de valeurs de départ correctes a échouée" msgid "design appears to be rank-deficient, so dropping some coefs" msgstr "le plan ne semble pas de rang plein, des coefs seront ignorés" msgid "'start' is not of the correct length" msgstr "'start' n'a pas la bonne longueur" msgid "Re-fitting to get Hessian" msgstr "Réajustement pour obtenir le Hessien" msgid "not a \"polr\" object" msgstr "ce n'est pas un objet \"polr\"" msgid "anova is not implemented for a single \"polr\" object" msgstr "l'anova n'est pas implémentée pour un objet \"polr\"" msgid "not all objects are of class \"polr\"" msgstr "tous les objets ne sont pas de la classe \"polr\"" msgid "models were not all fitted to the same size of dataset" msgstr "" "les modèles n'ont pas été ajustés à des jeux de données de même dimension" msgid "Parameter:" msgstr "Paramètre :" msgid "down" msgstr "vers le bas" msgid "up" msgstr "vers le haut" msgid "" "profiling has found a better solution, so original fit had not converged" msgstr "" "le profilage a donné une meilleure solution, l'ajustement original n'avait " "donc pas convergé" msgid "weighted fits are not supported" msgstr "les ajustements pondérés ne sont pas supportés" msgid "some group is too small for 'qda'" msgstr "un groupe est trop petit pour 'qda'" msgid "rank deficiency in group %s" msgstr "groupe %s n'est pas de rang plein" msgid "object not of class \"qda\"" msgstr "cet objet n'est pas de la calsse \"qda\"" msgid "cannot have leave-one-out CV with 'newdata'" msgstr "VC par eustachage impossible avec 'newdata'" msgid "'x' is singular: singular fits are not implemented in 'rlm'" msgstr "" "'x' est singulière : les ajustements singuliers ne sont pas implémentés dans " "'rlm'" msgid "invalid 'test.vec'" msgstr "'test.vec' incorrect" msgid "length of 'weights' must equal number of observations" msgstr "la longueur de 'weights' doit être égale au nombre d'observations " msgid "negative 'weights' value" msgstr "valeur de 'weights' négative" msgid "some of ... do not match" msgstr "des éléments de ... ne correspondent pas" msgid "'init' method is unknown" msgstr "méthode 'init' inconnue" msgid "'c' must be at least 1.548 and has been ignored" msgstr "'c' doit valoir au moins 1.548 et il a été ignoré " msgid "'method' is unknown" msgstr "'method' est inconnu" msgid "'rlm' failed to converge in %d steps" msgstr "la convergence de 'rlm' a échoué après %d étapes" msgid "'coef' must define a contrast, i.e., sum to 0" msgstr "'coef' doit définir un contraste, sa somme doit donc être nulle" msgid "'coef' must have same length as 'contrast.obj'" msgstr "'coef' doit être de la même longueur que 'contrast.obj'" msgid "each element of '%s' must be logical" msgstr "tous les éléments de '%s' doivent être des valeurs logiques" msgid "the contrast defined is empty (has no TRUE elements)" msgstr "le contraste défini est vide (aucun élément TRUE)" msgid "columns of 'contrast.obj' must define a contrast (sum to zero)" msgstr "" "les colonnes de 'contrast.obj' doivent définir un contraste (somme nulle)" msgid "\"gradient\" attribute missing" msgstr "attribut \"gradient\" manquant" msgid "\"hessian\" attribute missing" msgstr "attribut \"hessian\" manquant" msgid "regression apparently linear" msgstr "regression apparemment linéaire" msgid "Infs not allowed in 'd'" msgstr "Infs interdits dans 'd'" msgid "an initial configuration must be supplied if there are NAs in 'd'" msgstr "" "une configuration initiale doit être fournie si il y a des NAs dans 'd'" msgid "'use.start' cannot be used with R's version of 'glm'" msgstr "'use.start' n'est pas utilisable dans la version R de 'glm'" msgid "AIC is not defined for this model, so 'stepAIC' cannot proceed" msgstr "AIC n'est pas défini pour ce modèle, 'stepAIC' ne peut poursuivre" msgid "AIC is -infinity for this model, so 'stepAIC' cannot proceed" msgstr "AIC vaut -Inf pour ce modèle, 'stepAIC' ne peut poursuivre" msgid "0 df terms are changing AIC" msgstr "les termes à 0 ddl changent l'AIC" msgid "AIC undefined for REML fit" msgstr "AIC non défini pour un ajustement REML" msgid "'nbins' must result in a positive integer" msgstr "'nbins' doit renvoyer un entier positif" msgid "'h' must be strictly positive" msgstr "'h' doit être strictement positif" msgid "uneven breaks with 'prob = FALSE' will give a misleading plot" msgstr "" "des limites irrégulières avec 'prob = FALSE' donneront un graphique trompeur" msgid "'x' has length zero" msgstr "'x' est de longueur nulle" msgid "no solution in the specified range of bandwidths" msgstr "pas de solution dans l'intervalle de largeur de classes" msgid "minimum occurred at one end of the range" msgstr "le minimum est atteint à une extrémité de l'intervalle" msgid "using the %d/%d row from a combined fit" msgid_plural "using the %d/%d rows from a combined fit" msgstr[0] "utilisation de %d/%d ligne pour un ajustement combiné" msgstr[1] "utilisation des %d/%d lignes pour un ajustement combiné" msgid "group %s is empty" msgid_plural "groups %s are empty" msgstr[0] "le groupe %s est vide" msgstr[1] "les groupes %s sont vides" msgid "variable %s appears to be constant within groups" msgid_plural "variables %s appear to be constant within groups" msgstr[0] "la variable %s semble être constante à l'intérieur des groupes" msgstr[1] "les variables %s semblent être constantes à l'intérieur des groupes" msgid "only %d set, so all sets will be tried" msgid_plural "only %d sets, so all sets will be tried" msgstr[0] "seulement %d ensemble, donc tous les ensembles seront essayés" msgstr[1] "seulement %d ensembles, donc tous les ensembles seront essayés" msgid "%d missing observation deleted" msgid_plural "%d missing observations deleted" msgstr[0] "%d observation manquante supprimée" msgstr[1] "%d observations manquantes supprimées" msgid "%d row with zero weights not counted" msgid_plural "%d rows with zero weights not counted" msgstr[0] "%d ligne de poids nul non comptabilisée" msgstr[1] "%d lignes de poids nul non comptabilisées" #~ msgid "negative values not allowed for the Negative Binomal family" #~ msgstr "valeurs négatives non autorisées pour la famille binomiale négative" #~ msgid "supplying pars for the log-Normal is not supported" #~ msgstr "fixer des paramètres pour log-Normal n'est pas supporté" #~ msgid "supplying pars for the Normal is not supported" #~ msgstr "fixer des paramètres pour Normal n'est pas supporté" #~ msgid "supplying pars for the exponential is not supported" #~ msgstr "fixer des paramètres pour exponential n'est pas supporté" #~ msgid "supplying pars for the geometric is not supported" #~ msgstr "fixer des paramètres pour geometric n'est pas supporté" #~ msgid "F test assumes quasi%s family" #~ msgstr "le test F suppose la famille quasi%s" #~ msgid "missing observations deleted" #~ msgstr "observations manquantes supprimées" MASS/po/R-de.po0000644000176000001440000004356512312050163012620 0ustar ripleyusers# Translation of src/library/Recommended/MASS/R-MASS.pot to German # Copyright (C) 2007-2014 The R Foundation # This file is distributed under the same license as the lattice R package. # Chris Leick , 2009-2012. # Detlef Steuer , 2012-2014 msgid "" msgstr "" "Project-Id-Version: R 3.1.0 / MASS 7.3-30\n" "Report-Msgid-Bugs-To: bugs@r-project.org\n" "POT-Creation-Date: 2013-03-18 09:49\n" "PO-Revision-Date: 2014-03-16 16:32+0100\n" "Last-Translator: Detlef Steuer \n" "Language-Team: German \n" "Language: de\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" msgid "no terms in scope" msgstr "keine Bedingungen im Geltungsbereich" msgid "no terms in scope for adding to object" msgstr "keine Bedingungen im Geltungsbereich, um ein Objekt hinzuzufügen" msgid "trying + %s" msgstr "versuche + %s" msgid "number of rows in use has changed: remove missing values?" msgstr "" "Anzahl der benutzten Zeilen hat sich geändert: Fehlende Werte entfernen?" msgid "F test assumes 'quasi%s' family" msgstr "F-Test unterstellt eine 'quasi-%s'-Familie" msgid "no 'addterm' method implemented for \"mlm\" models" msgstr "keine 'addterm'-Methode für \"mlm\"-Modelle implementiert" msgid "scope is not a subset of term labels" msgstr "Geltungsbereich ist keine Untermenge von Bedingungsbeschriftungen" msgid "trying - %s" msgstr "versuche - %s" msgid "'dropterm' not implemented for \"mlm\" fits" msgstr "'dropterm' nicht für \"mlm\"-Anpassungen implementiert" msgid "iteration limit reached near 'x = %f'" msgstr "Iterationsgrenzwert erreicht bei 'x = %f'" msgid "%s does not have both 'qr' and 'y' components" msgstr "%s hat nicht sowohl 'qr'- als auch 'y'-Komponenten" msgid "response variable must be positive" msgstr "Rückmeldungsvariable muss positiv sein" msgid "Waiting for profiling to be done..." msgstr "Es wird auf das Profilieren gewartet ..." msgid "invalid number of levels" msgstr "falsche Anzahl der Stufen" msgid "frequency table is %d-dimensional" msgstr "Frequenztabelle ist %d-dimensional" msgid "invalid table specification" msgstr "ungültige Tabellenangabe" msgid "higher-way table requested. Only 2-way allowed" msgstr "Mehr als zwei-wege Tabelle angefordert, nur zwei-wege erlaubt" msgid "negative or non-integer entries in table" msgstr "negative oder nicht ganzzahlige Einträge in Tabelle" msgid "all frequencies are zero" msgstr "alle Frequenzen sind Null" msgid "empty row or column in table" msgstr "Leere Zeile oder Spalte in Tabelle" msgid "biplot is only possible if nf >= 2" msgstr "Biplot ist nur möglich, wenn nf >= 2" msgid "missing or infinite values in 'x'" msgstr "fehlende oder unendliche Werte in 'x'" msgid "length of 'wt' must equal number of observations" msgstr "Länge von 'wt' muss der Anzahl der Beobachtungen entsprechen" msgid "negative weights not allowed" msgstr "negative Gewichte nicht erlaubt" msgid "no positive weights" msgstr "keine positiven Gewichte" msgid "'center' is not the right length" msgstr "'center' ist nicht die richtige Länge" msgid "Probable convergence failure" msgstr "Wahrscheinlich konvergierte das Verfahren nicht" msgid "'uin' is too large to fit plot in" msgstr "'uin' ist zu groß, um in die grafische Darstellung zu passen" msgid "'x' must be a non-empty numeric vector" msgstr "'x' muss ein nicht-leerer, numerischer Vektor sein" msgid "'x' contains missing or infinite values" msgstr "'x' enthält fehlende oder unendliche Werte" msgid "'densfun' must be supplied as a function or name" msgstr "'densfun' muss als eine Funktion oder Name angegeben werden" msgid "unsupported distribution" msgstr "nicht unterstützte Verteilung" msgid "supplying pars for the %s distribution is not supported" msgstr "Angabe von Parametern wird für die %s-Verteilung nicht unterstützt" msgid "need positive values to fit a log-Normal" msgstr "" "es werden positive Werte benötigt, um an eine logarithmische " "Normalverteilung anzunähern" msgid "Exponential values must be >= 0" msgstr "Exponential Wert müssen > 0 sein" msgid "Weibull values must be > 0" msgstr "Weibull Werte müssen > 0 sein" msgid "gamma values must be >= 0" msgstr "gamma Werte müssen > 0 sein" msgid "'start' must be a named list" msgstr "'start' muss eine benannte Liste sein" msgid "'start' specifies names which are not arguments to 'densfun'" msgstr "'start' gibt Namen an, die keine Argumente für 'densfun' sind" msgid "optimization failed" msgstr "Optimierung fehlgeschlagen" msgid "only 'REML = FALSE' is implemented" msgstr "nur 'REML = FALSE' ist implementiert" msgid "Initial estimate: %s" msgstr "Anfängliche Schätzung: %s" msgid "Iter. %d Alpha: %s" msgstr "Iter. %d Alpha: %s" msgid "iteration limit reached" msgstr "Iterationsgrenze erreicht" msgid "package 'nlme' is essential" msgstr "Paket 'nlme' ist erforderlich" msgid "'family' not recognized" msgstr "'family' nicht erkannt" msgid "iteration %d" msgstr "Iteration %d" msgid "'anova' is not available for PQL fits" msgstr "'anova' ist nicht für PQL-Näherungen verfügbar" msgid "cannot estimate scale: MAD is zero for this sample" msgstr "Skala kann nicht geschätzt werden: MAD ist in dieser Stichprobe Null" msgid "an initial configuration must be supplied with NA/Infs in 'd'" msgstr "eine anfängliche Einrichtung muss mit NA/Infs in 'd' geliefert werden" msgid "'y' must be a matrix" msgstr "'y' muss eine Matrix sein" msgid "distances must be result of 'dist' or a square matrix" msgstr "" "Distanzen müssen das Ergebnis einer 'dist'- oder quadratischen Matrix sein" msgid "invalid size" msgstr "ungültige Größe" msgid "zero or negative distance between objects %d and %d" msgstr "Null oder negative Entfernung zwischen Objekten %d und %d" msgid "not enough non-missing data" msgstr "nicht genug nicht-fehlende Daten" msgid "invalid initial configuration" msgstr "ungültige Anfangskonfiguration" msgid "initial configuration must be complete" msgstr "Anfangskonfiguration muss vollständig sein" msgid "invalid row(x)" msgstr "ungültige row(x)" msgid "invalid length(d)" msgstr "ungültige length(d)" msgid "data vectors must be the same length" msgstr "Datenvektoren müssen die gleiche Länge haben" msgid "missing or infinite values in the data are not allowed" msgstr "fehlende oder unendliche Werte in den Daten sind nicht erlaubt" msgid "only finite values are allowed in 'lims'" msgstr "nur endliche Werte sind in 'lims' erlaubt" msgid "bandwidths must be strictly positive" msgstr "Bandbreiten müssen strikt positiv sein" msgid "'x' is not a matrix" msgstr "'x' ist keine Matrix" msgid "infinite, NA or NaN values in 'x'" msgstr "unendlich, NA- oder NaN-Werte in 'x'" msgid "nrow(x) and length(grouping) are different" msgstr "nrow(x) und length(grouping) sind unterschiedlich" msgid "invalid 'prior'" msgstr "ungültiger 'prior'" msgid "'prior' is of incorrect length" msgstr "'prior' hat fehlerhafte Länge" msgid "cannot use leave-one-out CV with method %s" msgstr "" "leave-one-out-Kreuzvalidierung kann nicht bei Methode %s angewandt werden" msgid "rank = 0: variables are numerically constant" msgstr "rank = 0: Variablen sind numerisch konstant" msgid "variables are collinear" msgstr "Variablen sind kollinear" msgid "'nu' must exceed 2" msgstr "'nu' muss 2 überschreiten" msgid "group means are numerically identical" msgstr "Gruppenmittelwerte sind numerisch identisch" msgid "object not of class \"lda\"" msgstr "Objekt nicht aus der Klasse \"lda\"" msgid "wrong number of variables" msgstr "falsche Variablenanzahl" msgid "variable names in 'newdata' do not match those in 'object'" msgstr "Variablennamen in 'newdata' entsprechen nicht denen in 'Objekt'" msgid "'breaks' must be strictly increasing" msgstr "'breaks' muss strikt ansteigend sein" msgid "'breaks' do not cover the data" msgstr "'breaks' deckt die Daten nicht ab" msgid "dim(W) is not correct" msgstr "dim(W) ist nicht richtig" msgid "'W' is not positive definite" msgstr "'W' ist nicht positiv definiert" msgid "'data' has no 'terms' attribute" msgstr "'data' hat kein 'terms'-Attribut" msgid "formula specifies no response" msgstr "Formel gibt keine Rückmeldung an" msgid "'object' has no 'call' component. Updating not possible" msgstr "'object' hat keine 'call'-Komponente. Aktualisierung nicht möglich" msgid "Response variable must be positive after additions" msgstr "Rückmeldungsvariable muss nach Additionen positiv sein" msgid "missing values are not allowed" msgstr "fehlende Werte sind nicht erlaubt" msgid "'x' and 'y' must have the same number of rows" msgstr "'x' und 'y' müssen die gleiche Anzahl von Zeilen haben" msgid "'quantile' must be at most %d" msgstr "'quantile' darf höchstens %d sein" msgid "'ps' must be at least 'p'" msgstr "'ps' muss mindestens 'p' sein" msgid "'lqs' failed: all the samples were singular" msgstr "'lqs' fehlgeschlagen: Alle Stichproben waren singulär" msgid "missing or infinite values are not allowed" msgstr "fehlende oder unendliche Werte sind nicht erlaubt" msgid "at least %d cases are needed" msgstr "mindestens %d Fälle werden benötigt" msgid "'quantile' must be at least %d" msgstr "'quantile' muss mindestens %d sein" msgid "at least one column has IQR 0" msgstr "mindestens eine Spalte hat IQR 0" msgid "'x' is probably collinear" msgstr "'x' ist wahrscheinlich kollinear" msgid "all variables must be factors" msgstr "alle Variablen müssen Faktoren sein" msgid "factors in 'newdata' do not match those for 'object'" msgstr "Faktoren in 'newdata' entsprechen nicht denen für 'Object'" msgid "'newdata' is not of the right length" msgstr "'newdata' hat nicht die richtige Länge" msgid "'X' must be a numeric or complex matrix" msgstr "'X' muss eine numerische oder komplexe Matrix sein" msgid "incompatible arguments" msgstr "inkompatible Argumente" msgid "'Sigma' is not positive definite" msgstr "'Sigma' ist nicht positiv definiert" msgid "'theta' must be given" msgstr "'theta' muss angegeben sein" msgid "negative values not allowed for the negative binomial family" msgstr "keine negativen Werte für negative binomische Familie erlaubt" msgid "tests made without re-estimating 'theta'" msgstr "Tests ohne Neuabschätzung von 'theta' durchgeführt" msgid "only Chi-squared LR tests are implemented" msgstr "nur LR-Tests der quadratische Chi-Verteilung sind implementiert" msgid "not all objects are of class \"negbin\"" msgstr "nicht alle Objekte gehören der Klasse \"negbin\" an" msgid "unimplemented method: %s" msgstr "nicht implementierte Methode: %s" msgid "Initial fit:" msgstr "Anfangsanpassung:" msgid "Initial value for 'theta': %f" msgstr "Anfänglicher Wert für 'theta': %f" msgid "alternation limit reached" msgstr "Grenze der Alternierungen erreicht" msgid "'theta' must be specified" msgstr "'theta' muss angegeben werden" msgid "" "\"%s\" link not available for negative binomial family; available links are " "\"identity\", \"log\" and \"sqrt\"" msgstr "" "Link \"%s\" nicht für negative binomische Familie verfügbar. Verfügbare " "Linkfunktionen sind \"identity\", \"log\" und \"sqrt\"" msgid "estimate truncated at zero" msgstr "Schätzung wurde auf Null abgeschnitten" msgid "theta.ml: iter" msgstr "theta.ml: iter" msgid "theta =" msgstr "theta =" msgid "extra arguments discarded" msgstr "zusätzliche Argumente verworfen" msgid "at least 3 distinct 'x' values are needed" msgstr "mindestens 3 verschiedene 'x'-Werte sind nötig" msgid "an intercept is needed and assumed" msgstr "ein Schnittpunkt wird benötigt und vorausgesetzt" msgid "response must be a factor" msgstr "Rückmeldung muss ein Faktor sein" msgid "response must have 3 or more levels" msgstr "Rückmeldung muss 3 oder mehr Stufen haben" msgid "attempt to find suitable starting values failed" msgstr "Versuch, geeignete Anfangswerte zu finden, fehlgeschlagen" msgid "design appears to be rank-deficient, so dropping some coefs" msgstr "" "Entwurf scheint Rang-defizitär zu sein, deshalb werden einige Koeffizienten " "fallen gelassen" msgid "'start' is not of the correct length" msgstr "'start' hat nicht die richtige Länge" msgid "Re-fitting to get Hessian" msgstr "Neuanpassung um Hesse-Matrix zu bestimmen" msgid "not a \"polr\" object" msgstr "kein \"polr\"-Objekt" msgid "anova is not implemented for a single \"polr\" object" msgstr "" "Varianzanalyse ist nicht für ein einzelnes \"polr\"-Objekt implementiert" msgid "not all objects are of class \"polr\"" msgstr "nicht alle Objekte gehören zur Klasse \"polr\"" msgid "models were not all fitted to the same size of dataset" msgstr "" "nicht alle Modelle wurden an die gleiche Größe, wie die des Datensatzes, " "angepasst" msgid "Parameter:" msgstr "Parameter:" msgid "down" msgstr "ab" msgid "up" msgstr "auf" msgid "" "profiling has found a better solution, so original fit had not converged" msgstr "" "Profilieren hat eine bessere Lösung gefunden, deshalb konvergierte die " "urspüngliche Anpassung nicht" msgid "weighted fits are not supported" msgstr "gewichtete Anpassungen werden nicht unterstützt" msgid "some group is too small for 'qda'" msgstr "irgendeine Gruppe ist für 'qda' zu klein" msgid "rank deficiency in group %s" msgstr "Rang-Defizit in Gruppe %s" msgid "object not of class \"qda\"" msgstr "Objekt nicht aus der Klasse \"qda\"" msgid "cannot have leave-one-out CV with 'newdata'" msgstr "kann mit 'newdata' keine leave-one-out Kreuzvalidierung durchführen" msgid "'x' is singular: singular fits are not implemented in 'rlm'" msgstr "" "'x' ist singulär: singuläre Anpassungen sind in 'rlm' nicht implementiert" msgid "invalid 'test.vec'" msgstr "'test.vec' ungültig" msgid "length of 'weights' must equal number of observations" msgstr "Länge von 'weights' muss der Anzahl der Beobachtungen entsprechen" msgid "negative 'weights' value" msgstr "negativer 'weights'-Wert" msgid "some of ... do not match" msgstr "manches von ... passt nicht" msgid "'init' method is unknown" msgstr "'init'-Methode ist unbekannt" msgid "'c' must be at least 1.548 and has been ignored" msgstr "'c' muss mindestens 1.548 sein und wurde ignoriert" msgid "'method' is unknown" msgstr "'method' ist unbekannt" msgid "'rlm' failed to converge in %d steps" msgstr "'rlm' nicht in %d Schritten konvergiert" msgid "'coef' must define a contrast, i.e., sum to 0" msgstr "'coef' muss einen Kontrast definieren, d.h. Summe auf 0" msgid "'coef' must have same length as 'contrast.obj'" msgstr "'coef' muss die gleiche Länge wie 'contrast.obj' haben" msgid "each element of '%s' must be logical" msgstr "jedes Element von '%s' muss boolesch sein" msgid "the contrast defined is empty (has no TRUE elements)" msgstr "der definierte Kontrast ist leer (hat keine TRUE-Elemente)" msgid "columns of 'contrast.obj' must define a contrast (sum to zero)" msgstr "" "Spalten von 'contrast.obj' müssen einen Kontrast definieren (Summen auf 0)" msgid "\"gradient\" attribute missing" msgstr "\"gradient\"-Attribut fehlt" msgid "\"hessian\" attribute missing" msgstr "\"hessian\"-Attribut fehlt" msgid "regression apparently linear" msgstr "Regression anscheinend linear" msgid "Infs not allowed in 'd'" msgstr "Infs in 'd' nicht erlaubt" msgid "an initial configuration must be supplied if there are NAs in 'd'" msgstr "" "es muss eine Anfangskonfiguration geliefert werden, wenn NAs in 'd' sind" msgid "'use.start' cannot be used with R's version of 'glm'" msgstr "'use.start' kann nicht mit der R-Version von 'glm' benutzt werden" msgid "AIC is not defined for this model, so 'stepAIC' cannot proceed" msgstr "" "AIC ist für dieses Modell nicht definiert, deshalb kann 'stepAIC' nicht " "fortfahren" msgid "AIC is -infinity for this model, so 'stepAIC' cannot proceed" msgstr "" "AIC ist -Inf für dieses Modell, deshalb kann 'stepAIC' nicht fortfahren" msgid "0 df terms are changing AIC" msgstr "0 df-Bedinungen verändern AIC" msgid "AIC undefined for REML fit" msgstr "AIC für REML-Näherung undefiniert" msgid "'nbins' must result in a positive integer" msgstr "'nbins' muss als Ergebnis eine positive Ganzzahl liefern" msgid "'h' must be strictly positive" msgstr "'h' muss strikt positiv sein" msgid "uneven breaks with 'prob = FALSE' will give a misleading plot" msgstr "" "ungerade Unterbrechungen mit 'prob = FALSE' ergeben eine irreführende " "grafische Darstellung" msgid "'x' has length zero" msgstr "'x' hat Länge Null" msgid "no solution in the specified range of bandwidths" msgstr "keine Lösung im angegebenen Bereich der Bandbreiten" msgid "minimum occurred at one end of the range" msgstr "Minimum an einem Ende des Bereichs aufgetreten" msgid "using the %d/%d row from a combined fit" msgid_plural "using the %d/%d rows from a combined fit" msgstr[0] "%d/%d Zeile wird von einer kombinierten Anpassung benutzt." msgstr[1] "%d/%d Zeilen werden von einer kombinierten Anpassung benutzt." msgid "group %s is empty" msgid_plural "groups %s are empty" msgstr[0] "Gruppe %s ist leer" msgstr[1] "Gruppen %s sind leer" msgid "variable %s appears to be constant within groups" msgid_plural "variables %s appear to be constant within groups" msgstr[0] "Variable %s scheint innerhalb der Gruppen konstant zu sein" msgstr[1] "Variablen %s scheinen innerhalb der Gruppen konstant zu sein" msgid "only %d set, so all sets will be tried" msgid_plural "only %d sets, so all sets will be tried" msgstr[0] "nur %d Menge, daher werden alle Mengen getestet" msgstr[1] "nur %d Mengen, daher werden alle Mengen getestet" msgid "%d missing observation deleted" msgid_plural "%d missing observations deleted" msgstr[0] "%d fehlende Beobachtung gelöscht" msgstr[1] "%d fehlende Beobachtungen gelöscht" msgid "%d row with zero weights not counted" msgid_plural "%d rows with zero weights not counted" msgstr[0] "%d Zeile mit Gewicht Null nicht gezählt" msgstr[1] "%d Zeilen mit Gewicht Null nicht gezählt" #~ msgid "Theta(%d) = %f, 2(Ls - Lm) = %f" #~ msgstr "Theta(%d) = %f, 2(Ls - Lm) = %f" #~ msgid "theta.ml: iter %d 'theta = %f'" #~ msgstr "theta.ml: iter %d 'theta = %f'" MASS/po/R-MASS.pot0000644000176000001440000002370412312050630013147 0ustar ripleyusersmsgid "" msgstr "" "Project-Id-Version: MASS 7.3-30\n" "POT-Creation-Date: 2014-03-18 14:00\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" msgid "no terms in scope" msgstr "" msgid "no terms in scope for adding to object" msgstr "" msgid "trying + %s" msgstr "" msgid "number of rows in use has changed: remove missing values?" msgstr "" msgid "F test assumes 'quasi%s' family" msgstr "" msgid "no 'addterm' method implemented for \"mlm\" models" msgstr "" msgid "scope is not a subset of term labels" msgstr "" msgid "trying - %s" msgstr "" msgid "'dropterm' not implemented for \"mlm\" fits" msgstr "" msgid "iteration limit reached near 'x = %f'" msgstr "" msgid "%s does not have both 'qr' and 'y' components" msgstr "" msgid "response variable must be positive" msgstr "" msgid "Waiting for profiling to be done..." msgstr "" msgid "invalid number of levels" msgstr "" msgid "frequency table is %d-dimensional" msgstr "" msgid "invalid table specification" msgstr "" msgid "higher-way table requested. Only 2-way allowed" msgstr "" msgid "negative or non-integer entries in table" msgstr "" msgid "all frequencies are zero" msgstr "" msgid "empty row or column in table" msgstr "" msgid "biplot is only possible if nf >= 2" msgstr "" msgid "missing or infinite values in 'x'" msgstr "" msgid "length of 'wt' must equal number of observations" msgstr "" msgid "negative weights not allowed" msgstr "" msgid "no positive weights" msgstr "" msgid "'center' is not the right length" msgstr "" msgid "Probable convergence failure" msgstr "" msgid "'uin' is too large to fit plot in" msgstr "" msgid "'x' must be a non-empty numeric vector" msgstr "" msgid "'x' contains missing or infinite values" msgstr "" msgid "'densfun' must be supplied as a function or name" msgstr "" msgid "unsupported distribution" msgstr "" msgid "supplying pars for the %s distribution is not supported" msgstr "" msgid "need positive values to fit a log-Normal" msgstr "" msgid "Exponential values must be >= 0" msgstr "" msgid "Weibull values must be > 0" msgstr "" msgid "gamma values must be >= 0" msgstr "" msgid "'start' must be a named list" msgstr "" msgid "'start' specifies names which are not arguments to 'densfun'" msgstr "" msgid "optimization failed" msgstr "" msgid "only 'REML = FALSE' is implemented" msgstr "" msgid "Initial estimate: %s" msgstr "" msgid "Iter. %d Alpha: %s" msgstr "" msgid "iteration limit reached" msgstr "" msgid "package 'nlme' is essential" msgstr "" msgid "'family' not recognized" msgstr "" msgid "iteration %d" msgstr "" msgid "'anova' is not available for PQL fits" msgstr "" msgid "cannot estimate scale: MAD is zero for this sample" msgstr "" msgid "an initial configuration must be supplied with NA/Infs in 'd'" msgstr "" msgid "'y' must be a matrix" msgstr "" msgid "distances must be result of 'dist' or a square matrix" msgstr "" msgid "invalid size" msgstr "" msgid "zero or negative distance between objects %d and %d" msgstr "" msgid "not enough non-missing data" msgstr "" msgid "invalid initial configuration" msgstr "" msgid "initial configuration must be complete" msgstr "" msgid "invalid row(x)" msgstr "" msgid "invalid length(d)" msgstr "" msgid "data vectors must be the same length" msgstr "" msgid "missing or infinite values in the data are not allowed" msgstr "" msgid "only finite values are allowed in 'lims'" msgstr "" msgid "bandwidths must be strictly positive" msgstr "" msgid "'x' is not a matrix" msgstr "" msgid "infinite, NA or NaN values in 'x'" msgstr "" msgid "nrow(x) and length(grouping) are different" msgstr "" msgid "invalid 'prior'" msgstr "" msgid "'prior' is of incorrect length" msgstr "" msgid "cannot use leave-one-out CV with method %s" msgstr "" msgid "rank = 0: variables are numerically constant" msgstr "" msgid "variables are collinear" msgstr "" msgid "'nu' must exceed 2" msgstr "" msgid "group means are numerically identical" msgstr "" msgid "object not of class \"lda\"" msgstr "" msgid "wrong number of variables" msgstr "" msgid "variable names in 'newdata' do not match those in 'object'" msgstr "" msgid "'breaks' must be strictly increasing" msgstr "" msgid "'breaks' do not cover the data" msgstr "" msgid "dim(W) is not correct" msgstr "" msgid "'W' is not positive definite" msgstr "" msgid "'data' has no 'terms' attribute" msgstr "" msgid "formula specifies no response" msgstr "" msgid "'object' has no 'call' component. Updating not possible" msgstr "" msgid "Response variable must be positive after additions" msgstr "" msgid "missing values are not allowed" msgstr "" msgid "'x' and 'y' must have the same number of rows" msgstr "" msgid "'quantile' must be at most %d" msgstr "" msgid "'ps' must be at least 'p'" msgstr "" msgid "'lqs' failed: all the samples were singular" msgstr "" msgid "missing or infinite values are not allowed" msgstr "" msgid "at least %d cases are needed" msgstr "" msgid "'quantile' must be at least %d" msgstr "" msgid "at least one column has IQR 0" msgstr "" msgid "'x' is probably collinear" msgstr "" msgid "all variables must be factors" msgstr "" msgid "factors in 'newdata' do not match those for 'object'" msgstr "" msgid "'newdata' is not of the right length" msgstr "" msgid "'X' must be a numeric or complex matrix" msgstr "" msgid "incompatible arguments" msgstr "" msgid "'Sigma' is not positive definite" msgstr "" msgid "'theta' must be given" msgstr "" msgid "negative values not allowed for the negative binomial family" msgstr "" msgid "tests made without re-estimating 'theta'" msgstr "" msgid "only Chi-squared LR tests are implemented" msgstr "" msgid "not all objects are of class \"negbin\"" msgstr "" msgid "unimplemented method: %s" msgstr "" msgid "Initial fit:" msgstr "" msgid "Initial value for 'theta': %f" msgstr "" msgid "alternation limit reached" msgstr "" msgid "'theta' must be specified" msgstr "" msgid "\"%s\" link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"" msgstr "" msgid "estimate truncated at zero" msgstr "" msgid "theta.ml: iter" msgstr "" msgid "theta =" msgstr "" msgid "extra arguments discarded" msgstr "" msgid "at least 3 distinct 'x' values are needed" msgstr "" msgid "an intercept is needed and assumed" msgstr "" msgid "response must be a factor" msgstr "" msgid "response must have 3 or more levels" msgstr "" msgid "attempt to find suitable starting values failed" msgstr "" msgid "design appears to be rank-deficient, so dropping some coefs" msgstr "" msgid "'start' is not of the correct length" msgstr "" msgid "Re-fitting to get Hessian" msgstr "" msgid "not a \"polr\" object" msgstr "" msgid "anova is not implemented for a single \"polr\" object" msgstr "" msgid "not all objects are of class \"polr\"" msgstr "" msgid "models were not all fitted to the same size of dataset" msgstr "" msgid "Parameter:" msgstr "" msgid "down" msgstr "" msgid "up" msgstr "" msgid "profiling has found a better solution, so original fit had not converged" msgstr "" msgid "weighted fits are not supported" msgstr "" msgid "some group is too small for 'qda'" msgstr "" msgid "rank deficiency in group %s" msgstr "" msgid "object not of class \"qda\"" msgstr "" msgid "cannot have leave-one-out CV with 'newdata'" msgstr "" msgid "'x' is singular: singular fits are not implemented in 'rlm'" msgstr "" msgid "invalid 'test.vec'" msgstr "" msgid "length of 'weights' must equal number of observations" msgstr "" msgid "negative 'weights' value" msgstr "" msgid "some of ... do not match" msgstr "" msgid "'init' method is unknown" msgstr "" msgid "'c' must be at least 1.548 and has been ignored" msgstr "" msgid "'method' is unknown" msgstr "" msgid "'rlm' failed to converge in %d steps" msgstr "" msgid "'coef' must define a contrast, i.e., sum to 0" msgstr "" msgid "'coef' must have same length as 'contrast.obj'" msgstr "" msgid "each element of '%s' must be logical" msgstr "" msgid "the contrast defined is empty (has no TRUE elements)" msgstr "" msgid "columns of 'contrast.obj' must define a contrast (sum to zero)" msgstr "" msgid "\"gradient\" attribute missing" msgstr "" msgid "\"hessian\" attribute missing" msgstr "" msgid "regression apparently linear" msgstr "" msgid "Infs not allowed in 'd'" msgstr "" msgid "an initial configuration must be supplied if there are NAs in 'd'" msgstr "" msgid "'use.start' cannot be used with R's version of 'glm'" msgstr "" msgid "AIC is not defined for this model, so 'stepAIC' cannot proceed" msgstr "" msgid "AIC is -infinity for this model, so 'stepAIC' cannot proceed" msgstr "" msgid "0 df terms are changing AIC" msgstr "" msgid "AIC undefined for REML fit" msgstr "" msgid "'nbins' must result in a positive integer" msgstr "" msgid "'h' must be strictly positive" msgstr "" msgid "uneven breaks with 'prob = FALSE' will give a misleading plot" msgstr "" msgid "'x' has length zero" msgstr "" msgid "no solution in the specified range of bandwidths" msgstr "" msgid "minimum occurred at one end of the range" msgstr "" msgid "using the %d/%d row from a combined fit" msgid_plural "using the %d/%d rows from a combined fit" msgstr[0] "" msgstr[1] "" msgid "group %s is empty" msgid_plural "groups %s are empty" msgstr[0] "" msgstr[1] "" msgid "variable %s appears to be constant within groups" msgid_plural "variables %s appear to be constant within groups" msgstr[0] "" msgstr[1] "" msgid "only %d set, so all sets will be tried" msgid_plural "only %d sets, so all sets will be tried" msgstr[0] "" msgstr[1] "" msgid "%d missing observation deleted" msgid_plural "%d missing observations deleted" msgstr[0] "" msgstr[1] "" msgid "%d row with zero weights not counted" msgid_plural "%d rows with zero weights not counted" msgstr[0] "" msgstr[1] "" MASS/po/R-pl.po0000644000176000001440000013674412315115517012655 0ustar ripleyusersmsgid "" msgstr "" "Project-Id-Version: MASS 7.3-30\n" "Report-Msgid-Bugs-To: bugs@R-project.org\n" "POT-Creation-Date: 2013-03-18 09:49\n" "PO-Revision-Date: 2014-03-24 17:44+0100\n" "Last-Translator: Åukasz Daniel \n" "Language-Team: Åukasz Daniel \n" "Language: pl_PL\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " "|| n%100>=20) ? 1 : 2);\n" "X-Poedit-SourceCharset: iso-8859-1\n" "X-Generator: Poedit 1.5.4\n" # MASS/R/add.R: 38 # stop("no terms in scope") # MASS/R/add.R: 108 # stop("no terms in scope") # MASS/R/add.R: 162 # stop("no terms in scope") msgid "no terms in scope" msgstr "brak czÅ‚onów w zakresie" # MASS/R/add.R: 42 # stop("no terms in scope for adding to object") # MASS/R/add.R: 166 # stop("no terms in scope for adding to object") msgid "no terms in scope for adding to object" msgstr "brak czÅ‚onów w zakresie do dodania do obiektu" # MASS/R/add.R: 56 # message(gettextf("trying + %s", tt), domain = "R-MASS") # MASS/R/add.R: 208 # message(gettextf("trying + %s", tt), domain = "R-MASS") msgid "trying + %s" msgstr "próbowanie + %s" # MASS/R/add.R: 65 # stop("number of rows in use has changed: remove missing values?") # MASS/R/add.R: 295 # stop("number of rows in use has changed: remove missing values?") # MASS/R/stepAIC.R: 182 # stop("number of rows in use has changed: remove missing values?") msgid "number of rows in use has changed: remove missing values?" msgstr "liczba wierszy w użyciu zmieniÅ‚a siÄ™: usunąć brakujÄ…ce wartoÅ›ci?" # MASS/R/add.R: 246 # warning(gettextf("F test assumes 'quasi%s' family", fam), domain = "R-MASS") # MASS/R/add.R: 430 # warning(gettextf("F test assumes 'quasi%s' family", fam), domain = "R-MASS") msgid "F test assumes 'quasi%s' family" msgstr "test Fishera zakÅ‚ada rodzinÄ™ 'kwazi-%s'" # MASS/R/add.R: 261 # stop("no 'addterm' method implemented for \"mlm\" models") msgid "no 'addterm' method implemented for \"mlm\" models" msgstr "brak zaimplementowanej metody 'addterm' dla modeli \"mlm\"" # MASS/R/add.R: 275 # stop("scope is not a subset of term labels") # MASS/R/add.R: 373 # stop("scope is not a subset of term labels") msgid "scope is not a subset of term labels" msgstr "zakres nie jest podzbiorem etykiet czÅ‚onów" # MASS/R/add.R: 286 # message(gettextf("trying - %s", tt), domain = "R-MASS") # MASS/R/add.R: 390 # message(gettextf("trying - %s", scope[i]), domain = "R-MASS") msgid "trying - %s" msgstr "próbowanie - %s" # MASS/R/add.R: 358 # stop("'dropterm' not implemented for \"mlm\" fits") msgid "'dropterm' not implemented for \"mlm\" fits" msgstr "'dropterm' nie jest zaimplementowane dla dopasowaÅ„ \"mlm\"" # MASS/R/area.R: 29 # warning(gettextf("iteration limit reached near 'x = %f'", d), # doman = NA) msgid "iteration limit reached near 'x = %f'" msgstr "osiÄ…gniÄ™to limit iteracji w pobliżu 'x = %f'" # MASS/R/boxcox.R: 50 # stop(gettextf("%s does not have both 'qr' and 'y' components", sQuote(deparse(substitute(object)))), domain = "R-MASS") # MASS/R/logtrans.R: 25 # stop(gettextf("%s does not have both 'qr' and 'y' components", sQuote(deparse(substitute(object)))), domain = "R-MASS") msgid "%s does not have both 'qr' and 'y' components" msgstr "%s nie posiada obydwu komponentów 'qr' oraz 'y'" # MASS/R/boxcox.R: 52 # stop("response variable must be positive") msgid "response variable must be positive" msgstr "zmienna zależna musi być dodatnia" # MASS/R/polr.R: 451 # message("Waiting for profiling to be done...") # MASS/R/confint.R: 23 # message("Waiting for profiling to be done...") # MASS/R/confint.R: 60 # message("Waiting for profiling to be done...") msgid "Waiting for profiling to be done..." msgstr "Oczekiwanie na wykonanie profilowania..." # MASS/R/contr.sdif.R: 21 # stop("invalid number of levels") # MASS/R/contr.sdif.R: 26 # stop("invalid number of levels") msgid "invalid number of levels" msgstr "niepoprawna liczba poziomów" # MASS/R/corresp.R: 22 # stop(gettextf("frequency table is %d-dimensional", m), domain = "R-MASS") msgid "frequency table is %d-dimensional" msgstr "tabela czÄ™stotliwoÅ›ci jest %d-wymiarowa" # MASS/R/corresp.R: 30 # stop("invalid table specification") msgid "invalid table specification" msgstr "niepoprawne okreÅ›lenie tabeli" # MASS/R/corresp.R: 39 # stop("higher-way table requested. Only 2-way allowed") msgid "higher-way table requested. Only 2-way allowed" msgstr "" "zażądano tabeli o wyższej kierunkowoÅ›ci. Tylko 2-kierunkowa jest dozwolona" # MASS/R/corresp.R: 48 # warning("negative or non-integer entries in table") msgid "negative or non-integer entries in table" msgstr "ujemne lub niecaÅ‚kowite wpisy w tabeli" # MASS/R/corresp.R: 49 # stop("all frequencies are zero") msgid "all frequencies are zero" msgstr "wszystkie czÄ™stotliwoÅ›ci wynoszÄ… zero" # MASS/R/corresp.R: 52 # stop("empty row or column in table") msgid "empty row or column in table" msgstr "pusty wiersz lub kolumna w tablicy" # MASS/R/corresp.R: 108 # stop("biplot is only possible if nf >= 2") msgid "biplot is only possible if nf >= 2" msgstr "biplot jest możliwy jeÅ›li 'nf >= 2'" # MASS/R/cov.trob.R: 23 # stop("missing or infinite values in 'x'") msgid "missing or infinite values in 'x'" msgstr "brakujÄ…ce lub nieskoÅ„czone wartoÅ›ci w 'x'" # MASS/R/cov.trob.R: 36 # stop("length of 'wt' must equal number of observations") msgid "length of 'wt' must equal number of observations" msgstr "dÅ‚ugość 'wt' musi równać siÄ™ liczbie obserwacji" # MASS/R/cov.trob.R: 37 # stop("negative weights not allowed") # MASS/R/negbin.R: 107 # stop("negative weights not allowed") msgid "negative weights not allowed" msgstr "ujemne wagi nie sÄ… dozwolone" # MASS/R/cov.trob.R: 38 # stop("no positive weights") msgid "no positive weights" msgstr "brak dodatnich wag" # MASS/R/cov.trob.R: 43 # stop("'center' is not the right length") msgid "'center' is not the right length" msgstr "argument 'center' nie posiada poprawnej dÅ‚ugoÅ›ci" # MASS/R/cov.trob.R: 63 # warning("Probable convergence failure") msgid "Probable convergence failure" msgstr "Prawdopodobne niepowodzenie zbieżnoÅ›ci" # MASS/R/eqscplot.R: 57 # stop("'uin' is too large to fit plot in") msgid "'uin' is too large to fit plot in" msgstr "argument 'uin' jest zbyt duży aby dopasować wykres" # MASS/R/fitdistr.R: 29 # stop("'x' must be a non-empty numeric vector") msgid "'x' must be a non-empty numeric vector" msgstr "argument 'x' musi być niepustym wektorem liczbowym" # MASS/R/fitdistr.R: 30 # stop("'x' contains missing or infinite values") msgid "'x' contains missing or infinite values" msgstr "argument 'x' zawiera brakujÄ…ce lub nieskoÅ„czone wartoÅ›ci" # MASS/R/fitdistr.R: 32 # stop("'densfun' must be supplied as a function or name") msgid "'densfun' must be supplied as a function or name" msgstr "argument 'densfun' musi być dostarczony jako funkcja lub nazwa" # MASS/R/fitdistr.R: 55 # stop("unsupported distribution") msgid "unsupported distribution" msgstr "niewspierany rozkÅ‚ad" # MASS/R/fitdistr.R: 90 # stop(gettext("supplying pars for the Poisson distribution is not supported"), domain = "R-MASS") msgid "supplying pars for the %s distribution is not supported" msgstr "dostarczanie parametrów dla rozkÅ‚adu %s nie jest wspierane" # MASS/R/fitdistr.R: 60 # stop("need positive values to fit a log-Normal") msgid "need positive values to fit a log-Normal" msgstr "" "potrzeba dodatnich wartoÅ›ci aby dopasować rozkÅ‚ad logarytmicznie normalny" # MASS/R/fitdistr.R: 101 # stop("Exponential values must be >= 0") msgid "Exponential values must be >= 0" msgstr "wartoÅ›ci eksponencjalne muszÄ… być >= 0" # MASS/R/fitdistr.R: 128 # stop("Weibull values must be > 0") msgid "Weibull values must be > 0" msgstr "wartoÅ›ci Weibulla muszÄ… być > 0" # MASS/R/fitdistr.R: 136 # stop("gamma values must be >= 0") msgid "gamma values must be >= 0" msgstr "wartoÅ›ci 'gamma' muszÄ… być >= 0" # MASS/R/fitdistr.R: 158 # stop("'start' must be a named list") msgid "'start' must be a named list" msgstr "'start' musi być nazwanÄ… listÄ…" # MASS/R/fitdistr.R: 165 # stop("'start' specifies names which are not arguments to 'densfun'") msgid "'start' specifies names which are not arguments to 'densfun'" msgstr "" "'start' okreÅ›la nazwy, które nie sÄ… argumentami przekazywanymi do 'densfun'" # MASS/R/fitdistr.R: 186 # stop("optimization failed") msgid "optimization failed" msgstr "optymalizacja nie powiodÅ‚a siÄ™" # MASS/R/fitdistr.R: 197 # stop("only 'REML = FALSE' is implemented") msgid "only 'REML = FALSE' is implemented" msgstr "tylko 'REML = FALSE' jest zaimplementowane" # MASS/R/gamma.shape.R: 31 # message(gettextf("Initial estimate: %s", format(alpha)), domain = "R-MASS") msgid "Initial estimate: %s" msgstr "PoczÄ…tkowe oszacowanie: %s" # MASS/R/gamma.shape.R: 42 # message(gettextf("Iter. %d Alpha: %s", itr, format(alpha)), domain = "R-MASS") msgid "Iter. %d Alpha: %s" msgstr "Iteracja %d Alpha: %s" # MASS/R/gamma.shape.R: 46 # warning("iteration limit reached") # MASS/R/negbin.R: 358 # warning("iteration limit reached") # MASS/R/negbin.R: 359 # gettext("iteration limit reached") msgid "iteration limit reached" msgstr "osiÄ…gniÄ™to graniczÄ… wartość iteracji" # MASS/R/glmmPQL.R: 20 # stop("package 'nlme' is essential") msgid "package 'nlme' is essential" msgstr "pakiet 'nlme' jest kluczowy" # MASS/R/glmmPQL.R: 26 # stop("'family' not recognized") msgid "'family' not recognized" msgstr "argument 'family' nie zostaÅ‚ rozpoznany" # MASS/R/glmmPQL.R: 81 # message(gettextf("iteration %d", i), domain = "R-MASS") msgid "iteration %d" msgstr "iteracja %d" # MASS/R/glmmPQL.R: 139 # stop("'anova' is not available for PQL fits") msgid "'anova' is not available for PQL fits" msgstr "funkcja 'anova()' nie jest dostÄ™pna dla dopasowaÅ„ PQL" # MASS/R/huber.R: 23 # stop("cannot estimate scale: MAD is zero for this sample") msgid "cannot estimate scale: MAD is zero for this sample" msgstr "nie można oszacować skali: MAD wynosi zero dla tej próby" # MASS/R/isoMDS.R: 21 # stop("an initial configuration must be supplied with NA/Infs in 'd'") msgid "an initial configuration must be supplied with NA/Infs in 'd'" msgstr "" "poczÄ…tkowa konfiguracja musi zostać dostarczona z wartosciami NA/Inf w 'd'" # MASS/R/isoMDS.R: 22 # stop("'y' must be a matrix") # MASS/R/sammon.R: 24 # stop("'y' must be a matrix") msgid "'y' must be a matrix" msgstr "argument 'y' musi być macierzÄ…" # MASS/R/isoMDS.R: 27 # stop("distances must be result of 'dist' or a square matrix") # MASS/R/sammon.R: 29 # stop("distances must be result of 'dist' or a square matrix") msgid "distances must be result of 'dist' or a square matrix" msgstr "odlegÅ‚oÅ›ci muszÄ… być wynikiem 'dist' lub być kwadratowÄ… macierzÄ…" # MASS/R/isoMDS.R: 36 # stop("invalid size") # MASS/R/sammon.R: 38 # stop("invalid size") msgid "invalid size" msgstr "niepoprawny rozmiar" # MASS/R/isoMDS.R: 42 # stop(gettextf("zero or negative distance between objects %d and %d", aa[1,1], aa[1,2]), domain = "R-MASS") # MASS/R/sammon.R: 44 # stop(gettextf("zero or negative distance between objects %d and %d", aa[1,1], aa[1,2]), domain = "R-MASS") msgid "zero or negative distance between objects %d and %d" msgstr "zerowa lub ujemna odlegÅ‚ość pomiÄ™dzy obiektami %d oraz %d" # MASS/R/isoMDS.R: 46 # stop("not enough non-missing data") # MASS/R/sammon.R: 48 # stop("not enough non-missing data") msgid "not enough non-missing data" msgstr "niewystarczajÄ…ca liczba niebrakujÄ…cych danych" # MASS/R/isoMDS.R: 48 # stop("invalid initial configuration") # MASS/R/sammon.R: 50 # stop("invalid initial configuration") msgid "invalid initial configuration" msgstr "niepoprawna poczÄ…tkowa konfiguracja" # MASS/R/isoMDS.R: 49 # stop("initial configuration must be complete") # MASS/R/sammon.R: 51 # stop("initial configuration must be complete") msgid "initial configuration must be complete" msgstr "poczÄ…tkowa konfiguracja musi być kompletna" # MASS/R/isoMDS.R: 78 # stop("invalid row(x)") msgid "invalid row(x)" msgstr "niepoprawne 'row(x)'" # MASS/R/isoMDS.R: 84 # stop("invalid length(d)") msgid "invalid length(d)" msgstr "niepoprawne 'length(d)'" # MASS/R/kde2d.R: 21 # stop("data vectors must be the same length") msgid "data vectors must be the same length" msgstr "wektory danych muszÄ… być tej samej dÅ‚ugoÅ›ci" # MASS/R/kde2d.R: 23 # stop("missing or infinite values in the data are not allowed") msgid "missing or infinite values in the data are not allowed" msgstr "wartoÅ›ci brakujÄ…ce lub nieskoÅ„czone nie sÄ… dozwolone w zbiorze danych" # MASS/R/kde2d.R: 25 # stop("only finite values are allowed in 'lims'") msgid "only finite values are allowed in 'lims'" msgstr "tylko skoÅ„czone wartoÅ›ci sÄ… dozwolone w 'lims'" # MASS/R/kde2d.R: 32 # stop("bandwidths must be strictly positive") msgid "bandwidths must be strictly positive" msgstr "szerokoÅ›ci pasma muszÄ… być Å›ciÅ›cle dodatnie" # MASS/R/lda.R: 78 # stop("'x' is not a matrix") # MASS/R/qda.R: 78 # stop("'x' is not a matrix") msgid "'x' is not a matrix" msgstr "argument 'x' nie jest macierzÄ…" # MASS/R/lda.R: 81 # stop("infinite, NA or NaN values in 'x'") # MASS/R/qda.R: 81 # stop("infinite, NA or NaN values in 'x'") msgid "infinite, NA or NaN values in 'x'" msgstr "wartoÅ›ci nieskoÅ„czone, NA lub NaN w 'x'" # MASS/R/lda.R: 85 # stop("nrow(x) and length(grouping) are different") # MASS/R/qda.R: 85 # stop("nrow(x) and length(grouping) are different") msgid "nrow(x) and length(grouping) are different" msgstr "'nrow(x)' oraz 'length(grouping)' sÄ… różne" # MASS/R/isoMDS.R: 78 # stop("invalid row(x)") msgid "invalid 'prior'" msgstr "niepoprawny argument 'prior'" # MASS/R/lda.R: 91 # stop("'prior' is of incorrect length") # MASS/R/lda.R: 249 # stop("'prior' is of incorrect length") # MASS/R/qda.R: 95 # stop("'prior' is of incorrect length") # MASS/R/qda.R: 192 # stop("'prior' is of incorrect length") msgid "'prior' is of incorrect length" msgstr "argument 'prior' posiada niepoprawnÄ… dÅ‚ugość" # MASS/R/lda.R: 109 # stop(gettext("cannot use leave-one-out CV with method %s", sQuote(method)), domain = "R-MASS") # MASS/R/qda.R: 103 # stop(gettext("cannot use leave-one-out CV with method %s", sQuote(method)), domain = "R-MASS") # MASS/R/qda.R: 188 # stop(gettext("cannot use leave-one-out CV with method %s", sQuote(mt), domain = "R-MASS")) msgid "cannot use leave-one-out CV with method %s" msgstr "nie można użyć 'zostaw-jeden' CV z metodÄ… %s" # MASS/R/lda.R: 126 # stop("rank = 0: variables are numerically constant") # MASS/R/lda.R: 147 # stop("rank = 0: variables are numerically constant") # MASS/R/lda.R: 155 # stop("rank = 0: variables are numerically constant") msgid "rank = 0: variables are numerically constant" msgstr "rank = 0: zmienne sÄ… liczbowo staÅ‚e" # MASS/R/lda.R: 127 # warning("variables are collinear") # MASS/R/lda.R: 148 # warning("variables are collinear") # MASS/R/lda.R: 156 # warning("variables are collinear") msgid "variables are collinear" msgstr "zmienne sÄ… współliniowe" # MASS/R/lda.R: 131 # stop("'nu' must exceed 2") # MASS/R/qda.R: 112 # stop("'nu' must exceed 2") msgid "'nu' must exceed 2" msgstr "argument 'nu' musi przekraczać 2" # MASS/R/lda.R: 194 # stop("group means are numerically identical") msgid "group means are numerically identical" msgstr "Å›rednie grup sÄ… liczbowo identyczne" # MASS/R/lda.R: 213 # stop("object not of class \"lda\"") msgid "object not of class \"lda\"" msgstr "obiekt nie jest klasy \"lda\"" # MASS/R/lda.R: 242 # stop("wrong number of variables") # MASS/R/qda.R: 235 # stop("wrong number of variables") msgid "wrong number of variables" msgstr "niepoprawna liczba zmiennych" # MASS/R/lda.R: 245 # warning("variable names in 'newdata' do not match those in 'object'") # MASS/R/qda.R: 238 # warning("variable names in 'newdata' do not match those in 'object'") msgid "variable names in 'newdata' do not match those in 'object'" msgstr "nazwy zmiennych w 'newdata' nie zgadzajÄ… siÄ™ z tymi w 'object'" # MASS/R/lda.R: 375 # stop("'breaks' must be strictly increasing") # MASS/R/truehist.R: 55 # stop("'breaks' must be strictly increasing") msgid "'breaks' must be strictly increasing" msgstr "argument 'breaks' musi być Å›ciÅ›le rosnÄ…cy" # MASS/R/lda.R: 377 # stop("'breaks' do not cover the data") # MASS/R/truehist.R: 57 # stop("'breaks' do not cover the data") msgid "'breaks' do not cover the data" msgstr "argument 'breaks' nie pokrywa danych" # MASS/R/lm.gls.R: 33 # stop("dim(W) is not correct") msgid "dim(W) is not correct" msgstr "'dim(W)' nie jest poprawne" # MASS/R/lm.gls.R: 36 # stop("'W' is not positive definite") msgid "'W' is not positive definite" msgstr "argument 'W' nie jest dodatnio okreÅ›lony" # MASS/R/loglm.R: 82 # stop("'data' has no 'terms' attribute") # MASS/R/loglm.R: 120 # stop("'data' has no 'terms' attribute") msgid "'data' has no 'terms' attribute" msgstr "'data' nie posiada atrybutu 'terms'" # MASS/R/loglm.R: 83 # stop("formula specifies no response") msgid "formula specifies no response" msgstr "formuÅ‚a nie okreÅ›la zmiennej zależnej" # MASS/R/loglm.R: 267 # stop("'object' has no 'call' component. Updating not possible") msgid "'object' has no 'call' component. Updating not possible" msgstr "" "argument 'object' nie posiada komponentu 'call'. Aktualizacja nie jest " "możliwa" # MASS/R/logtrans.R: 29 # stop("Response variable must be positive after additions") msgid "Response variable must be positive after additions" msgstr "Zmienna zależna musi być dodatniÄ… po wykonanych uzupeÅ‚nieniach" # MASS/R/lqs.R: 66 # stop("missing values are not allowed") msgid "missing values are not allowed" msgstr "brakujÄ…ce wartoÅ›ci nie sÄ… dozwolone" # MASS/R/lqs.R: 75 # stop("'x' and 'y' must have the same number of rows") msgid "'x' and 'y' must have the same number of rows" msgstr "'x' oraz 'y' muszÄ… mieć tÄ™ samÄ… liczbÄ™ wierszy" # MASS/R/lqs.R: 92 # stop(gettextf("'quantile' must be at most %d", n-1), domain = "R-MASS") # MASS/R/lqs.R: 211 # stop(gettextf("'quantile' must be at most %d", n-1), domain = "R-MASS") msgid "'quantile' must be at most %d" msgstr "'quantile' musi być najwyżej %d" # MASS/R/lqs.R: 97 # warning("'ps' must be at least 'p'") msgid "'ps' must be at least 'p'" msgstr "'ps' musi być co najmniej 'p'" # MASS/R/lqs.R: 132 # stop("'lqs' failed: all the samples were singular", call.=FALSE) msgid "'lqs' failed: all the samples were singular" msgstr "'lqs' nie powiodÅ‚o siÄ™: wszystkie próby byÅ‚y osobliwe" # MASS/R/lqs.R: 201 # stop("missing or infinite values are not allowed") msgid "missing or infinite values are not allowed" msgstr "wartoÅ›ci brakujÄ…ce lub nieskoÅ„czone nie sÄ… dozwolone" # MASS/R/lqs.R: 204 # stop(gettextf("at least %d cases are needed", p+1), domain = "R-MASS") msgid "at least %d cases are needed" msgstr "co najmniej %d przypadków jest potrzebnych" # MASS/R/lqs.R: 209 # stop(gettextf("'quantile' must be at least %d", p+1), domain = "R-MASS") msgid "'quantile' must be at least %d" msgstr "'quantile' musi być co najmniej %d" # MASS/R/lqs.R: 214 # stop("at least one column has IQR 0") msgid "at least one column has IQR 0" msgstr "co najmniej jedna kolumna ma IQR równe 0" # MASS/R/lqs.R: 249 # stop("'x' is probably collinear") msgid "'x' is probably collinear" msgstr "argument 'x' jest najprawdopodobniej współliniowy" # MASS/R/mca.R: 28 # stop("all variables must be factors") # MASS/R/mca.R: 88 # stop("all variables must be factors") msgid "all variables must be factors" msgstr "wszystkie zmienne muszÄ… być czynnikami" # MASS/R/mca.R: 95 # stop("factors in 'newdata' do not match those for 'object'") msgid "factors in 'newdata' do not match those for 'object'" msgstr "czynniki w 'newdata' nie zgadzajÄ… siÄ™ z tymi dla 'object'" # MASS/R/cov.trob.R: 43 # stop("'center' is not the right length") msgid "'newdata' is not of the right length" msgstr "argument 'newdata' nie posiada poprawnej dÅ‚ugoÅ›ci" # MASS/R/misc.R: 36 # stop("'X' must be a numeric or complex matrix") msgid "'X' must be a numeric or complex matrix" msgstr "argument 'X' musi być macierzÄ… liczbowÄ… lub zespolonÄ…" # MASS/R/mvrnorm.R: 21 # stop("incompatible arguments") msgid "incompatible arguments" msgstr "niezgodne argumenty" # MASS/R/mvrnorm.R: 25 # stop("'Sigma' is not positive definite") msgid "'Sigma' is not positive definite" msgstr "argument 'Sigma' nie jest dodatnio okreÅ›lona" # MASS/man/MASS-internal.Rd: 21 # stop("'theta' must be given") # MASS/R/neg.bin.R: 17 # stop("'theta' must be given") msgid "'theta' must be given" msgstr "argument 'theta' musi być podany" # MASS/R/neg.bin.R: 38 # stop("negative values not allowed for the negative binomial family") # MASS/R/negbin.R: 246 # stop("negative values not allowed for the negative binomial family") msgid "negative values not allowed for the negative binomial family" msgstr "ujemne wartoÅ›ci nie sÄ… dozwolone dla rodziny rozkÅ‚adu Pascala" # MASS/R/negbin.R: 8 # warning("tests made without re-estimating 'theta'") msgid "tests made without re-estimating 'theta'" msgstr "testy wykonano bez ponownego oszacowania 'theta'" # MASS/R/negbin.R: 16 # warning("only Chi-squared LR tests are implemented") msgid "only Chi-squared LR tests are implemented" msgstr "jedynie testy Chi-kwadrat regresji liniowej sÄ… zaimplementowane" # MASS/R/negbin.R: 23 # stop("not all objects are of class \"negbin\"") msgid "not all objects are of class \"negbin\"" msgstr "nie wszystkie obiekt sÄ… klasy \"negbin\"" # MASS/R/negbin.R: 115 # stop(gettextf("unimplemented method: %s", sQuote(method)), domain = "R-MASS") msgid "unimplemented method: %s" msgstr "niezaimplementowana metoda: %s" # MASS/R/negbin.R: 121 # message("Initial fit:") msgid "Initial fit:" msgstr "PoczÄ…tkowe dopasowanie:" # MASS/R/negbin.R: 134 # message(gettextf("Initial value for 'theta': %f", signif(th)), domain = "R-MASS") msgid "Initial value for 'theta': %f" msgstr "PoczÄ…tkowa wartość dla 'theta': %f" # MASS/R/negbin.R: 167 # warning("alternation limit reached") # MASS/R/negbin.R: 168 # gettext("alternation limit reached") msgid "alternation limit reached" msgstr "osiÄ…gniÄ™to limit zmian" # MASS/man/negative.binomial.Rd: 14 # stop("'theta' must be specified") # MASS/man/rnegbin.Rd: 14 # stop("'theta' must be specified") # MASS/R/negbin.R: 212 # stop("'theta' must be specified") # MASS/R/negbin.R: 267 # stop("'theta' must be specified") msgid "'theta' must be specified" msgstr "argument 'theta' musi być okreÅ›lony" # MASS/R/negbin.R: 227 # stop(gettextf("\"%s\" link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"", linktemp)) msgid "" "\"%s\" link not available for negative binomial family; available links are " "\"identity\", \"log\" and \"sqrt\"" msgstr "" "połączenie \"%s\" nie jest dostÄ™pne dla rodziny rozkÅ‚adu Pascala; dostÄ™pne " "połączenia to \"identity\", \"log\" oraz \"sqrt\"" # MASS/R/negbin.R: 320 # warning("estimate truncated at zero") # MASS/R/negbin.R: 321 # gettext("estimate truncated at zero") # MASS/R/negbin.R: 354 # warning("estimate truncated at zero") # MASS/R/negbin.R: 355 # gettext("estimate truncated at zero") # MASS/R/negbin.R: 386 # warning("estimate truncated at zero") # MASS/R/negbin.R: 387 # gettext("estimate truncated at zero") msgid "estimate truncated at zero" msgstr "oszacowanie zostaÅ‚o przyciÄ™te w zerze" # MASS/R/negbin.R: 345 # message(sprintf("theta.ml: iter %d 'theta = %f'", it, signif(t0)), domain = "R-MASS") # MASS/R/negbin.R: 350 # message(gettextf("theta.ml: iter %d 'theta = %f'", it, signif(t0))) msgid "theta.ml: iter" msgstr "theta.ml: iteracja" msgid "theta =" msgstr "theta =" # MASS/R/negbin.R: 394 # warning("extra arguments discarded") msgid "extra arguments discarded" msgstr "dodatkowe argumenty zostaÅ‚y odrzucone" # MASS/R/negexp.R: 22 # stop("at least 3 distinct 'x' values are needed") msgid "at least 3 distinct 'x' values are needed" msgstr "co najmniej 3 różne wartoÅ›ci 'x' sÄ… potrzebne" # MASS/R/polr.R: 37 # warning("an intercept is needed and assumed") msgid "an intercept is needed and assumed" msgstr "przeciÄ™cie jest potrzebne oraz jest zakÅ‚adane" # MASS/R/polr.R: 43 # stop("response must be a factor") msgid "response must be a factor" msgstr "zmienna zależna musi być czynnikiem" # MASS/R/polr.R: 45 # stop("response must have 3 or more levels") # MASS/R/polr.R: 366 # stop("response must have 3 or more levels") msgid "response must have 3 or more levels" msgstr "zmienna zależna musi mieć 3 lub wiÄ™cej poziomów" # MASS/R/polr.R: 62 # stop("attempt to find suitable starting values failed") msgid "attempt to find suitable starting values failed" msgstr "próba znalezienia odpowiednich wartoÅ›ci startowych nie powiodÅ‚a siÄ™" # MASS/R/polr.R: 65 # warning("design appears to be rank-deficient, so dropping some coefs") msgid "design appears to be rank-deficient, so dropping some coefs" msgstr "" "projekt wydaje siÄ™ mieć deficyt rang, wiÄ™c usuwam niektóre współczynniki" # MASS/R/polr.R: 77 # stop("'start' is not of the correct length") msgid "'start' is not of the correct length" msgstr "argument 'start' nie posiada poprawnej dÅ‚ugoÅ›ci" # MASS/R/polr.R: 145 # message("\n", "Re-fitting to get Hessian", "\n", sep = "") msgid "Re-fitting to get Hessian" msgstr "Ponowne dopasowywanie aby uzyskać hesjan" # MASS/R/polr.R: 216 # stop("not a \"polr\" object") msgid "not a \"polr\" object" msgstr "to nie jest obiekt \"polr\"" # MASS/R/polr.R: 290 # stop("anova is not implemented for a single \"polr\" object") msgid "anova is not implemented for a single \"polr\" object" msgstr "" "funkcja 'anova()' nie jest zaimplementowana dla pojedynczego obiektu klasy " "\"polr\"" # MASS/R/polr.R: 297 # stop("not all objects are of class \"polr\"") msgid "not all objects are of class \"polr\"" msgstr "nie wszystkie obiekty sÄ… klasy \"polr\"" # MASS/R/polr.R: 300 # stop("models were not all fitted to the same size of dataset") msgid "models were not all fitted to the same size of dataset" msgstr "" "nie wszystkie modele zostaÅ‚y dopasowane do zbioru danych tego samego rozmiaru" msgid "Parameter:" msgstr "Parametr:" # MASS/R/polr.R: 414 # message("\nParameter:", pi, c("down", "up")[(sgn + 1)/2 + 1]) # MASS/R/profiles.R: 70 # message("\nParameter: ", pi, " ", # c("down", "up")[(sgn + 1)/2 + 1]) msgid "down" msgstr "down" # MASS/R/polr.R: 414 # message("\nParameter:", pi, c("down", "up")[(sgn + 1)/2 + 1]) # MASS/R/profiles.R: 70 # message("\nParameter: ", pi, " ", # c("down", "up")[(sgn + 1)/2 + 1]) msgid "up" msgstr "up" # MASS/R/polr.R: 432 # stop("profiling has found a better solution, so original fit had not converged") # MASS/R/profiles.R: 91 # stop("profiling has found a better solution, so original fit had not converged") msgid "" "profiling has found a better solution, so original fit had not converged" msgstr "" "profilowanie znalazÅ‚o lepsze rozwiÄ…zanie, tak wiÄ™c oryginalne dopasowanie " "nie uzbieżniÅ‚o siÄ™" # MASS/R/polr.R: 489 # stop("weighted fits are not supported") msgid "weighted fits are not supported" msgstr "ważone dopasowania nie sÄ… wspierane" # MASS/R/qda.R: 90 # stop("some group is too small for 'qda'") msgid "some group is too small for 'qda'" msgstr "niektóre grupy sÄ… zbyt maÅ‚e dla 'qda'" # MASS/R/qda.R: 127 # stop(gettextf("rank deficiency in group %s", lev[i]), domain = "R-MASS") # MASS/R/qda.R: 135 # stop(gettextf("rank deficiency in group %s", lev[i]), domain = "R-MASS") msgid "rank deficiency in group %s" msgstr "deficyt rang w grupie %s" # MASS/R/qda.R: 182 # stop("object not of class \"qda\"") msgid "object not of class \"qda\"" msgstr "obiekt nie jest klasy \"qda\"" # MASS/R/qda.R: 185 # stop("cannot have leave-one-out CV with 'newdata'") msgid "cannot have leave-one-out CV with 'newdata'" msgstr "nie można mieć 'zostaw-jeden' CV z 'newdata'" # MASS/R/rlm.R: 95 # stop("'x' is singular: singular fits are not implemented in 'rlm'") msgid "'x' is singular: singular fits are not implemented in 'rlm'" msgstr "" "'x' jest osobliwe: osobliwe dopasowania nie sÄ… zaimplementowane w 'rlm'" # MASS/R/rlm.R: 98 # stop("invalid 'test.vec'") msgid "invalid 'test.vec'" msgstr "niepoprawny argument 'test.vec'" # MASS/R/rlm.R: 104 # stop("length of 'weights' must equal number of observations") msgid "length of 'weights' must equal number of observations" msgstr "dÅ‚ugość 'weights' musi równać siÄ™ liczbie obserwacji" # MASS/R/rlm.R: 105 # stop("negative 'weights' value") msgid "negative 'weights' value" msgstr "ujemna wartość 'weights'" # MASS/R/rlm.R: 123 # warning("some of ... do not match") msgid "some of ... do not match" msgstr "niektóre z '...' nie pasujÄ…" # MASS/R/rlm.R: 132 # stop("'init' method is unknown") msgid "'init' method is unknown" msgstr "metoda 'init()' jest nieznana" # MASS/R/rlm.R: 151 # warning("'c' must be at least 1.548 and has been ignored") msgid "'c' must be at least 1.548 and has been ignored" msgstr "'c' musi być co najmniej 1.548; wartość zostaÅ‚a zignorowane" # MASS/R/rlm.R: 154 # stop("'method' is unknown") msgid "'method' is unknown" msgstr "'method' jest nieznane" # MASS/R/rlm.R: 191 # warning(gettextf("'rlm' failed to converge in %d steps", maxit), domain = "R-MASS") msgid "'rlm' failed to converge in %d steps" msgstr "'rlm' nie uzbieżniÅ‚ siÄ™ w %d krokach" # MASS/R/rlm.R: 385 # stop("'coef' must define a contrast, i.e., sum to 0") msgid "'coef' must define a contrast, i.e., sum to 0" msgstr "'coef' musi definiować kontrast, tzn. sumować siÄ™ do 0" # MASS/R/rlm.R: 387 # stop("'coef' must have same length as 'contrast.obj'") msgid "'coef' must have same length as 'contrast.obj'" msgstr "'coef' musi mieć tÄ™ samÄ… dÅ‚ugość jak 'contrast.obj'" # MASS/R/rlm.R: 393 # stop(gettextf("each element of '%s' must be logical", substitute(contrasts.list)), domain = "R-MASS") msgid "each element of '%s' must be logical" msgstr "każdy element '%s' musi być typem logicznym" # MASS/R/rlm.R: 397 # stop("the contrast defined is empty (has no TRUE elements)") msgid "the contrast defined is empty (has no TRUE elements)" msgstr "zdefiniowany kontrast jest pusty (nie ma PRAWDZIWYCH elementów)" # MASS/R/rlm.R: 402 # stop("columns of 'contrast.obj' must define a contrast (sum to zero)") msgid "columns of 'contrast.obj' must define a contrast (sum to zero)" msgstr "kolumny 'contrast.obj' muszÄ… okreÅ›lać konstrast (sumować siÄ™ do zera)" # MASS/R/rms.curv.R: 22 # stop("\"gradient\" attribute missing") msgid "\"gradient\" attribute missing" msgstr "brakuje atrybutu \"gradient\"" # MASS/R/rms.curv.R: 24 # stop("\"hessian\" attribute missing") msgid "\"hessian\" attribute missing" msgstr "brakuje atrybutu \"hessian\"" # MASS/R/rms.curv.R: 34 # warning("regression apparently linear") msgid "regression apparently linear" msgstr "regresja jest najwyraźniej liniowa" # MASS/R/sammon.R: 21 # stop("Infs not allowed in 'd'") msgid "Infs not allowed in 'd'" msgstr "NieskoÅ„czonoÅ›ci nie sÄ… dozwolone w 'd'" # MASS/R/sammon.R: 23 # stop("an initial configuration must be supplied if there are NAs in 'd'") msgid "an initial configuration must be supplied if there are NAs in 'd'" msgstr "" "poczÄ…tkowa konfiguracja musi zostać dostarczona jeÅ›li sÄ… wartoÅ›ci Na w 'd'" # MASS/R/stepAIC.R: 75 # warning("'use.start' cannot be used with R's version of 'glm'") msgid "'use.start' cannot be used with R's version of 'glm'" msgstr "argument 'use.start' nie może być użyty z wersjÄ… R 'glm'" # MASS/R/stepAIC.R: 105 # stop("AIC is not defined for this model, so 'stepAIC' cannot proceed") msgid "AIC is not defined for this model, so 'stepAIC' cannot proceed" msgstr "" "AIC nie jest zdefiniowane dla tego modelu, wiÄ™c 'stepAIC' nie może " "kontynuować" # MASS/R/stepAIC.R: 107 # stop("AIC is -infinity for this model, so 'stepAIC' cannot proceed") msgid "AIC is -infinity for this model, so 'stepAIC' cannot proceed" msgstr "" "AIC wynosi minus nieskoÅ„czoność dla tego modelu, tak wiÄ™c 'stepAIC' nie może " "kontynuować" # MASS/R/stepAIC.R: 141 # warning("0 df terms are changing AIC") msgid "0 df terms are changing AIC" msgstr "0 różniczkowych czÅ‚onów zmienia AIC" # MASS/R/stepAIC.R: 212 # stop("AIC undefined for REML fit") # MASS/R/stepAIC.R: 220 # stop("AIC undefined for REML fit") msgid "AIC undefined for REML fit" msgstr "AIC nie jest zdefiniowane dla dopasowania REML" # MASS/R/truehist.R: 46 # stop("'nbins' must result in a positive integer") msgid "'nbins' must result in a positive integer" msgstr "argument 'nbins' musi być dodatniÄ… liczbÄ… caÅ‚kowitÄ…" # MASS/R/truehist.R: 50 # stop("'h' must be strictly positive") msgid "'h' must be strictly positive" msgstr "argument 'h' musi być Å›ciÅ›le dodatni" # MASS/R/truehist.R: 60 # warning("uneven breaks with 'prob = FALSE' will give a misleading plot") msgid "uneven breaks with 'prob = FALSE' will give a misleading plot" msgstr "nieparzyste przerwy z 'prob = FALSE' dadzÄ… mylny wykres" # MASS/R/ucv.R: 42 # stop("'x' has length zero") # MASS/R/ucv.R: 84 # stop("'x' has length zero") # MASS/R/ucv.R: 113 # stop("'x' has length zero") msgid "'x' has length zero" msgstr "argument 'x' posiada zerowÄ… dÅ‚ugość" # MASS/R/ucv.R: 64 # stop("no solution in the specified range of bandwidths") msgid "no solution in the specified range of bandwidths" msgstr "brak rozwiÄ…zania we wskazanym zakresie pasm" # MASS/R/ucv.R: 97 # warning("minimum occurred at one end of the range") # MASS/R/ucv.R: 127 # warning("minimum occurred at one end of the range") msgid "minimum occurred at one end of the range" msgstr "pojawiÅ‚a siÄ™ wartość minimum na jednym z koÅ„ców zakresu" # MASS/R/add.R: 188 # warning(sprintf(ngettext(newn, # "using the %d/%d row from a combined fit", # "using the %d/%d rows from a combined fit", domain = "R-MASS"), # newn, oldn), domain = NA) msgid "using the %d/%d row from a combined fit" msgid_plural "using the %d/%d rows from a combined fit" msgstr[0] "używanie %d/%d wiersza z połączonego dopasowania" msgstr[1] "używanie %d/%d wierszy z połączonego dopasowania" msgstr[2] "używanie %d/%d wierszy z połączonego dopasowania" # MASS/R/lda.R: 96 # warning(sprintf(ngettext(length(empty), # "group %s is empty", # "groups %s are empty", domain = "R-MASS"), # paste(empty, collapse = " ")), domain = NA) msgid "group %s is empty" msgid_plural "groups %s are empty" msgstr[0] "grupa %s jest pusta" msgstr[1] "grupy %s sÄ… puste" msgstr[2] "grupy %s sÄ… puste" # MASS/R/lda.R: 114 # stop(sprintf(ngettext(length(const), # "variable %s appears to be constant within groups", # "variables %s appear to be constant within groups", domain = "R-MASS"), # paste(const, collapse = " ")), domain = NA) msgid "variable %s appears to be constant within groups" msgid_plural "variables %s appear to be constant within groups" msgstr[0] "zmienna %s wyglÄ…da na stałą wewnÄ…trz grup" msgstr[1] "zmienne %s wyglÄ…dajÄ… na staÅ‚e wewnÄ…trz grup" msgstr[2] "zmienne %s wyglÄ…dajÄ… na staÅ‚e wewnÄ…trz grup" # MASS/R/lqs.R: 105 # warning(sprintf(ngettext(nexact, # "only %d set, so all sets will be tried", # "only %d sets, so all sets will be tried", domain = "R-MASS"), # nexact), domain = NA) # MASS/R/lqs.R: 222 # warning(sprintf(ngettext(nexact, # "only %d set, so all sets will be tried", # "only %d sets, so all sets will be tried", domain = "R-MASS"), # nexact), domain = NA) msgid "only %d set, so all sets will be tried" msgid_plural "only %d sets, so all sets will be tried" msgstr[0] "tylko %d zbiór, wiÄ™c wszystkie zbiory zostanÄ… sprawdzone" msgstr[1] "tylko %d zbiory, wiÄ™c wszystkie zbiory zostanÄ… sprawdzone" msgstr[2] "tylko %d zbiorów, wiÄ™c wszystkie zbiory zostanÄ… sprawdzone" # MASS/R/stdres.R: 25 # warning(sprintf(ngettext(n.miss, # "%d missing observation deleted", # "%d missing observations deleted", domain = "R-MASS"), # n.miss), domain = NA) msgid "%d missing observation deleted" msgid_plural "%d missing observations deleted" msgstr[0] "%d brakujÄ…ca obserwacja zostaÅ‚a usuniÄ™ta" msgstr[1] "%d brakujÄ…ce obserwacje zostaÅ‚y usuniÄ™te" msgstr[2] "%d brakujÄ…cych obserwacji zostaÅ‚o usuniÄ™tych" # MASS/R/stdres.R: 40 # warning(sprintf(ngettext(sum(excl), # "%d row with zero weights not counted", # "%d rows with zero weights not counted", domain = "R-MASS"), # sum(excl)), domain = NA) msgid "%d row with zero weights not counted" msgid_plural "%d rows with zero weights not counted" msgstr[0] "%d wiersz z zerowymi wagami nie zostaÅ‚ zliczony" msgstr[1] "%d wiersze z zerowymi wagami nie zostaÅ‚y zliczone" msgstr[2] "%d wierszy z zerowymi wagami nie zostaÅ‚o zliczonych" #~ msgid "Single term additions" #~ msgstr "Dodawania pojedynczych czÅ‚onów" # MASS/R/loglm.R: 207 # gettextf("Model %d:\n", i, domain = "R-MASS") #~ msgid "Model:" #~ msgstr "Model:" #~ msgid "scale:" #~ msgstr "skala:" #~ msgid "Single term deletions" #~ msgstr "Usuwania pojedynczych czÅ‚onów" #~ msgid "log-Likelihood" #~ msgstr "logarytm funkcji wiarygodnoÅ›ci" # MASS/R/corresp.R: 96 # gettext("First canonical correlation(s):", domain = "R-MASS") #~ msgid "First canonical correlation(s):" #~ msgstr "Pierwsze kanoniczne korelacje:" # MASS/R/corresp.R: 98 # gettextf("%s scores:", rcn[1L]) # MASS/R/corresp.R: 100 # gettextf("%s scores:", rcn[2L]) #~ msgid "%s scores:" #~ msgstr "punktacje %s:" # MASS/R/mca.R: 102 # stop("'newdata' is not of the right length") #~ msgid "'%s' argument is of the wrong length" #~ msgstr "argument '%s' ma niepoprawnÄ… dÅ‚ugość" # MASS/R/fitdistr.R: 58 # stop(gettext("supplying pars for the log-Normal distribution is not supported"), domain = "R-MASS") #~ msgid "supplying pars for the log-Normal distribution is not supported" #~ msgstr "" #~ "dostarczanie parametrów dla rozkÅ‚adu log-normalnego nie jest wspierane" # MASS/R/fitdistr.R: 76 # stop(gettext("supplying pars for the Normal distribution is not supported"), domain = "R-MASS") #~ msgid "supplying pars for the Normal distribution is not supported" #~ msgstr "dostarczanie parametrów dla rozkÅ‚adu normalnego nie jest wspierane" # MASS/R/fitdistr.R: 103 # stop(gettext("supplying pars for the exponential distribution is not supported"), domain = "R-MASS") #~ msgid "supplying pars for the exponential distribution is not supported" #~ msgstr "" #~ "dostarczanie parametrów dla rozkÅ‚adu eksponencjalnego nie jest wspierane" # MASS/R/fitdistr.R: 115 # stop(gettext("supplying pars for the geometric distribution is not supported"), domain = "R-MASS") #~ msgid "supplying pars for the geometric distribution is not supported" #~ msgstr "" #~ "dostarczanie parametrów dla rozkÅ‚adu geometrycznego nie jest wspierane" # MASS/R/isoMDS.R: 36 # stop("invalid size") # MASS/R/sammon.R: 38 # stop("invalid size") #~ msgid "invalid '%s'" #~ msgstr "niepoprawne '%s'" # MASS/R/lda.R: 90 # stop("invalid 'prior'") # MASS/R/lda.R: 248 # stop("invalid 'prior'") # MASS/R/qda.R: 94 # stop("invalid 'prior'") # MASS/R/qda.R: 191 # stop("invalid 'prior'") #~ msgid "invalid 'prior' argument" #~ msgstr "niepoprawna argument 'prior'" #~ msgid "'%s' argument is not an object of class %s" #~ msgstr "argument '%s' nie jest obiektem klasy %s" # MASS/R/lda.R: 294 # gettext("Call:", domain = "R-MASS") # MASS/R/polr.R: 114 # gettext("Call:", domain = "R-MASS") # MASS/R/polr.R: 186 # gettext("Call:\n", domain = "R-MASS") # MASS/R/qda.R: 300 # gettext("Call:", domain = "R-MASS") # MASS/R/rlm.R: 210 # gettext("Call:\n", domain = "R-MASS") # MASS/R/mca.R: 50 # gettext("Call:\n", domain = "R-MASS") # MASS/R/lqs.R: 172 # gettext("Call:\n", domain = "R-MASS") # MASS/R/loglm.R: 215 # gettext("Call:\n", domain = "R-MASS") #~ msgid "Call:" #~ msgstr "WywoÅ‚anie:" # MASS/R/lda.R: 297 # gettext("Prior probabilities of groups:", domain = "R-MASS") # MASS/R/qda.R: 303 # gettext("Prior probabilities of groups:", domain = "R-MASS") #~ msgid "Prior probabilities of groups:" #~ msgstr "Pierwotne prawdopodobieÅ„stwa grup:" # MASS/R/lda.R: 299 # gettext("Group means:", domain = "R-MASS") # MASS/R/qda.R: 305 # gettext("Group means:", domain = "R-MASS") #~ msgid "Group means:" #~ msgstr "Åšrednie grup:" # MASS/R/lda.R: 301 # gettext("Coefficients of linear discriminants:", domain = "R-MASS") #~ msgid "Coefficients of linear discriminants:" #~ msgstr "Współczynniki liniowych dyskryminantów:" # MASS/R/lda.R: 306 # gettext("Proportion of trace:", domain = "R-MASS") #~ msgid "Proportion of trace:" #~ msgstr "Proporcje Å›ladu:" # MASS/R/lm.ridge.R: 74 # gettextf("modified HKB estimator is %s", format(obj$kHKB), domain = "R-MASS") #~ msgid "modified HKB estimator is %s" #~ msgstr "zmodyfikowany estymator HKB to %s" # MASS/R/lm.ridge.R: 75 # gettextf("modified L-W estimator is %s", format(obj$kLW), domain = "R-MASS") #~ msgid "modified L-W estimator is %s" #~ msgstr "zmodyfikowany estymator L-W to %s" # MASS/R/lm.ridge.R: 79 # gettextf("smallest value of GCV at %s", format(obj$lambda[k]), domain = "R-MASS") #~ msgid "smallest value of GCV at %s" #~ msgstr "najmniejsza wartość GCV w %s" #~ msgid "LR tests for hierarchical log-linear models" #~ msgstr "testy LR dla hierarchicznych modeli logarytmiczno-liniowych" # MASS/R/loglm.R: 207 # gettextf("Model %d:\n", i, domain = "R-MASS") #~ msgid "Model %d:" #~ msgstr "Model %d:" #~ msgid "Statistics:" #~ msgstr "Statystyka:" # MASS/R/loglm.R: 240 # gettext("Re-fitting to find fitted values\n", domain = "R-MASS") #~ msgid "Re-fitting to find fitted values" #~ msgstr "Ponowne dopasowywanie aby znaleźć wartoÅ›ci dopasowania" # MASS/R/loglm.R: 253 # gettext("Formula:\n", domain = "R-MASS") #~ msgid "Formula:" #~ msgstr "FormuÅ‚a:" #~ msgid "Observed (Expected):" #~ msgstr "Zaobserwowane (Oczekiwane):" # MASS/R/loglm.R: 291 # gettext("Re-fitting to get fitted values\n", domain = "R-MASS") #~ msgid "Re-fitting to get fitted values" #~ msgstr "Ponowne dopasowywanie aby uzyskać wartoÅ›ci dopasowania" # MASS/R/loglm.R: 240 # gettext("Re-fitting to find fitted values\n", domain = "R-MASS") #~ msgid "Re-fitting to get frequencies and fitted values" #~ msgstr "" #~ "Ponowne dopasowywanie aby uzyskać czÄ™stotliwoÅ›ci oraz wartoÅ›ci dopasowania" # MASS/R/loglm.R: 321 # gettext("Re-fitting to calculate missing coefficients\n", domain = "R-MASS") #~ msgid "Re-fitting to calculate missing coefficients" #~ msgstr "Ponowne dopasowywanie aby wyznaczyć brakujÄ…ce współczynniki" # MASS/R/polr.R: 118 # gettext("Coefficients:", domain = "R-MASS") # MASS/R/polr.R: 192 # gettext("Coefficients:", domain = "R-MASS") # MASS/R/lqs.R: 176 # gettext("Coefficients:\n", domain = "R-MASS") #~ msgid "Coefficients:" #~ msgstr "Współczynniki:" #~ msgid "Scale estimates %s" #~ msgstr "Oszacowania skali %s" #~ msgid "Multiple correspondence analysis of %d cases of %d factors" #~ msgstr "Wielokrotna analiza odpowiednioÅ›ci %d przypadków %d czynników" #~ msgid "Correlations %s cumulative %% explained %s" #~ msgstr "Korelacje %s skumulowany %% wyjaÅ›niony %s" #~ msgid "Likelihood ratio tests of Negative Binomial Models" #~ msgstr "testy stosunku funkcji wiarygodnoÅ›ci dla modeli ujemnych dwumianów" # MASS/R/negbin.R: 291 # gettext("Warning while fitting theta: ", domain = "R-MASS") #~ msgid "Warning while fitting theta:" #~ msgstr "Ostrzeżenie podczas dopasowania 'theta':" #~ msgid "2 x log-likelihood: %s" #~ msgstr "2 x logarytm funkcji wiarygodnoÅ›ci: %s" # MASS/R/polr.R: 121 # gettext("No coefficients", domain = "R-MASS") # MASS/R/polr.R: 196 # gettext("No coefficients", domain = "R-MASS") #~ msgid "No coefficients" #~ msgstr "Brak współczynników" # MASS/R/polr.R: 123 # gettext("Intercepts:", domain = "R-MASS") # MASS/R/polr.R: 198 # gettext("Intercepts:", domain = "R-MASS") #~ msgid "Intercepts:" #~ msgstr "PrzeciÄ™cia:" #~ msgid "Residual Deviance: %s" #~ msgstr "Odchylenie reszt: %s" # MASS/R/polr.R: 126 # gettextf("AIC: %s", format(x$deviance + 2*x$edf, nsmall=2L), domain = "R-MASS") # MASS/R/polr.R: 202 # gettextf("AIC: %s", format(x$deviance + 2*x$edf, nsmall=2L), domain = "R-MASS") #~ msgid "AIC: %s" #~ msgstr "AIC: %s" # MASS/R/polr.R: 129 # gettext("Warning: did not converge as iteration limit reached", domain = "R-MASS") #~ msgid "Warning: did not converge as iteration limit reached" #~ msgstr "" #~ "Ostrzeżenie: nie uzyskano zbieżnoÅ›ci przed osiÄ…gniÄ™ciem granicy iteracji" #~ msgid "Correlation of Coefficients:" #~ msgstr "Korelacja współczynników:" #~ msgid "Likelihood ratio tests of ordinal regression models" #~ msgstr "" #~ "testy stosunku funkcji wiarygodnoÅ›ci dla modeli regresji porzÄ…dkowej" #~ msgid "Parameter: %s down" #~ msgstr "Parametr: %s w dół" # MASS/R/rlm.R: 214 # gettextf("Converged in %d iterations\n", length(x$conv), domain = "R-MASS") #~ msgid "Converged in %d iterations" #~ msgstr "Uzbieżnione w %d iteracjach" # MASS/R/rlm.R: 215 # gettextf("Ran %d iterations without convergence\n", length(x$conv), domain = "R-MASS") #~ msgid "Ran %d iterations without convergence" #~ msgstr "Wykonano %d iteracji bez uzyskania zbieżnoÅ›ci" #~ msgid "Degrees of freedom: %d total; %d residual" #~ msgstr "Stopnie swobody: wszystkich %d; reszt %d" # MASS/R/rlm.R: 223 # gettextf("Scale estimate: %s", format(signif(x$s,3)), domain = "R-MASS") #~ msgid "Scale estimate: %s" #~ msgstr "Oszacowanie skali: %s" # MASS/R/rlm.R: 306 # gettext("Weighted Residuals:\n", domain = "R-MASS") #~ msgid "Weighted Residuals:" #~ msgstr "Ważone reszty:" # MASS/R/rlm.R: 307 # gettext("Residuals:\n", domain = "R-MASS") #~ msgid "Residuals:" #~ msgstr "Reszty:" #~ msgid "Coefficients: (%d not defined because of singularities)" #~ msgstr "Współczynniki: (%d niezdefiniowane z uwagi na osobliwoÅ›ci)" #~ msgid "Residual standard error: %s on %d degrees of freedom" #~ msgstr "Błąd standardowy reszt: %s dla %d stopni swobody" # MASS/R/rms.curv.R: 60 # gettextf("Parameter effects: c^theta x sqrt(F) = %s", round(x$pe, 4), domain = "R-MASS") #~ msgid "Parameter effects: c^theta x sqrt(F) = %s" #~ msgstr "Efekty parametru: c^theta x sqrt(F) = %s" # MASS/R/rms.curv.R: 61 # gettextf("Intrinsic: c^iota x sqrt(F) = %s", round(x$ic, 4), domain = "R-MASS") #~ msgid "Intrinsic: c^iota x sqrt(F) = %s" #~ msgstr "Faktyczny: c^iota x sqrt(F) = %s" # MASS/R/stepAIC.R: 52 # gettext("Stepwise Model Path \nAnalysis of Deviance Table", domain = "R-MASS") #~ msgid "" #~ "Stepwise Model Path \n" #~ "Analysis of Deviance Table" #~ msgstr "" #~ "Stopniowa Å›cieżka modelu\n" #~ "Analiza tablicy odchyleÅ„" # MASS/R/stepAIC.R: 53 # gettext("Initial Model:", domain = "R-MASS") #~ msgid "Initial Model:" #~ msgstr "PoczÄ…tkowy model:" # MASS/R/stepAIC.R: 54 # gettext("Final Model:", domain = "R-MASS") #~ msgid "Final Model:" #~ msgstr "KoÅ„cowy model:" # MASS/R/stepAIC.R: 111 # gettextf("Start: AIC=%s", format(round(bAIC, 2)), domain = "R-MASS") #~ msgid "Start: AIC=%s" #~ msgstr "Start: AIC=%s" #~ msgid "Step: AIC=%s" #~ msgstr "Krok: AIC=%s" # MASS/R/lqs.R: 133 # sprintf(ngettext(z$sing, "%d singular sample of size %d out of %d", "%d singular samples of size %d out of %d", domain = "R-MASS"), z$sing, ps, nsamp) # MASS/R/lqs.R: 245 # sprintf(ngettext(z$sing, "%d singular sample of size %d out of %d", "%d singular samples of size %d out of %d", domain = "R-MASS"), z$sing, ps, nsamp) #~ msgid "%d singular sample of size %d out of %d" #~ msgid_plural "%d singular samples of size %d out of %d" #~ msgstr[0] "%d osobliwa próbka o rozmiarze %d z %d" #~ msgstr[1] "%d osobliwe próbki o rozmiarze %d z %d" #~ msgstr[2] "%d osobliwych próbek o rozmiarze %d z %d" #~ msgid "log Likelihood" #~ msgstr "logarytm funkcji wiarygodnoÅ›ci" #, fuzzy #~ msgid "Scale estimate:" #~ msgstr "PoczÄ…tkowe oszacowanie: %s" MASS/po/R-ko.po0000644000176000001440000004047412121561422012640 0ustar ripleyusers# Korean translation for R MASS package # Recommended/MASS/po/R-ko.po # Maintainer: Brian Ripley # Copyright (C) 1995-2013 The R Core Team # This file is distributed under the same license as the R MASS package. # R Development Translation Team - Korean # Chel Hee Lee , 2013. # Chel Hee Lee , 2013. # msgid "" msgstr "" "Project-Id-Version: MASS 7.3-22\n" "POT-Creation-Date: 2013-03-18 09:49\n" "PO-Revision-Date: 2013-03-11 13:47-0600\n" "Last-Translator: Chel Hee Lee \n" "Language-Team: R Development Translation Teams (Korean) \n" "Language: ko\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" "X-Poedit-Language: Korean\n" "X-Poedit-Country: KOREA, REPUBLIC OF\n" "X-Poedit-SourceCharset: utf-8\n" msgid "no terms in scope" msgstr "scopeì— ì•„ë¬´ëŸ° í•­ì´ ì—†ìŠµë‹ˆë‹¤" msgid "no terms in scope for adding to object" msgstr "" msgid "trying + %s" msgstr "" msgid "number of rows in use has changed: remove missing values?" msgstr "ì‚¬ìš©ì¤‘ì¸ í–‰ë“¤ì´ ë³€ê²½ë˜ì—ˆìŠµë‹ˆë‹¤: ê²°ì¸¡ì¹˜ë“¤ì„ ì‚­ì œí• ê¹Œìš”?" msgid "F test assumes 'quasi%s' family" msgstr "F 테스트는 'quasi%s' 페밀리ë¼ëŠ” ê°€ì •í•˜ì— ìˆ˜í–‰ë©ë‹ˆë‹¤" msgid "no 'addterm' method implemented for \"mlm\" models" msgstr "\"mlm\"모ë¸ë“¤ì„ 위하여 êµ¬í˜„ëœ 'addterm'메소드가 아닙니다" msgid "scope is not a subset of term labels" msgstr "" msgid "trying - %s" msgstr "" msgid "'dropterm' not implemented for \"mlm\" fits" msgstr "" msgid "iteration limit reached near 'x = %f'" msgstr "" msgid "%s does not have both 'qr' and 'y' components" msgstr "" msgid "response variable must be positive" msgstr "종ì†ë³€ìˆ˜ëŠ” 반드시 양수ì´ì–´ì•¼ 합니다" msgid "Waiting for profiling to be done..." msgstr "프로파ì¼ë§ì´ 완료ë˜ê¸¸ 기다리는 중입니다..." msgid "invalid number of levels" msgstr "유효하지 ì•Šì€ levelì˜ ìˆ˜ìž…ë‹ˆë‹¤" msgid "frequency table is %d-dimensional" msgstr "%d-ì°¨ì› ë¶„í• í‘œìž…ë‹ˆë‹¤" msgid "invalid table specification" msgstr "유효하지 ì•Šì€ í…Œì´ë¸” 지정입니다" msgid "higher-way table requested. Only 2-way allowed" msgstr "" msgid "negative or non-integer entries in table" msgstr "" msgid "all frequencies are zero" msgstr "모든 빈ë„ìˆ˜ë“¤ì´ 0입니다" msgid "empty row or column in table" msgstr "í…Œì´ë¸”ì— í–‰ ë˜ëŠ” ì—´ì´ ë¹„ì–´ 있습니다" msgid "biplot is only possible if nf >= 2" msgstr "" msgid "missing or infinite values in 'x'" msgstr "'x'ì— ëˆ„ë½ëœ ê°’ ë˜ëŠ” 무한한 ê°’ë“¤ì´ ìžˆìŠµë‹ˆë‹¤" msgid "length of 'wt' must equal number of observations" msgstr "'wt'ì˜ ê¸¸ì´ëŠ” 반드시 ê´€ì¸¡ì¹˜ì˜ ê°œìˆ˜ì™€ 같아야 합니다" msgid "negative weights not allowed" msgstr "ìŒì˜ ê°’ì„ ê°€ì§€ëŠ” 가중치는 허용ë˜ì§€ 않습니다" msgid "no positive weights" msgstr "" msgid "'center' is not the right length" msgstr "'center'ì˜ ê¸¸ì´ê°€ 올바르지 않습니다" msgid "Probable convergence failure" msgstr "" msgid "'uin' is too large to fit plot in" msgstr "" msgid "'x' must be a non-empty numeric vector" msgstr "'x'는 반드시 비어있지 ì•Šì€ ìˆ˜ì¹˜í˜• 벡터ì´ì–´ì•¼ 합니다" msgid "'x' contains missing or infinite values" msgstr "'x'는 결측값 ë˜ëŠ” 무한한 ê°’ë“¤ì„ í¬í•¨í•˜ê³  있습니다" msgid "'densfun' must be supplied as a function or name" msgstr "'densfun'ì€ ë°˜ë“œì‹œ 함수 ë˜ëŠ” ì´ë¦„으로서 주어져야 합니다" msgid "unsupported distribution" msgstr "ì§€ì›ë˜ì§€ 않는 ë¶„í¬ìž…니다" msgid "supplying pars for the %s distribution is not supported" msgstr "" msgid "need positive values to fit a log-Normal" msgstr "log-Normalì„ ì í•©í•˜ê¸° 위해서는 ì–‘ì˜ ê°’ë“¤ì„ í•„ìš”ë¡œ 합니다" msgid "Exponential values must be >= 0" msgstr "Exponenital values는 반드시 0보다 í¬ê±°ë‚˜ 같아야 합니다" msgid "Weibull values must be > 0" msgstr "Weibull values는 반드시 0 보다 커야 합니다" msgid "gamma values must be >= 0" msgstr "gamma valuesë“¤ì€ ë°˜ë“œì‹œ 0 보다 í¬ê±°ë‚˜ 같아야 합니다" msgid "'start' must be a named list" msgstr "'start'는 반드시 named listì´ì–´ì•¼ 합니다" msgid "'start' specifies names which are not arguments to 'densfun'" msgstr "" msgid "optimization failed" msgstr "최ì í™”ì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤" msgid "only 'REML = FALSE' is implemented" msgstr "오로지 'REML = FALSE'ë§Œì´ êµ¬í˜„ë˜ì–´ 있습니다" msgid "Initial estimate: %s" msgstr "초기 추정치는 다ìŒê³¼ 같습니다: %s" msgid "Iter. %d Alpha: %s" msgstr "" msgid "iteration limit reached" msgstr "iteration ì œí•œì— ë„달했습니다" msgid "package 'nlme' is essential" msgstr "패키지 'nlme'는 필수ì ìž…니다" msgid "'family' not recognized" msgstr "ì¸ì‹í•  수 없는 'family'입니다" msgid "iteration %d" msgstr "" msgid "'anova' is not available for PQL fits" msgstr "PQL ì í•©ì— 'anova'는 사용가능하지 않습니다" msgid "cannot estimate scale: MAD is zero for this sample" msgstr "" msgid "an initial configuration must be supplied with NA/Infs in 'd'" msgstr "" msgid "'y' must be a matrix" msgstr "'y'는 반드시 행렬ì´ì–´ì•¼ 합니다" msgid "distances must be result of 'dist' or a square matrix" msgstr "distances는 반드시 'dist'ì˜ ê²°ê³¼ì´ê±°ë‚˜ 정방행렬ì´ì–´ì•¼ 합니다" msgid "invalid size" msgstr "유효하지 ì•Šì€ í¬ê¸°ìž…니다" msgid "zero or negative distance between objects %d and %d" msgstr "%d와 %d ê°ì²´ë“¤ 사ì´ì˜ 거리가 0 ë˜ëŠ” ìŒìˆ˜ìž…니다" msgid "not enough non-missing data" msgstr "" msgid "invalid initial configuration" msgstr "유효하지 ì•Šì€ ì´ˆê¸° 설정입니다" msgid "initial configuration must be complete" msgstr "" msgid "invalid row(x)" msgstr "유효하지 ì•Šì€ row(x)입니다" msgid "invalid length(d)" msgstr "유효하지 ì•Šì€ length(d)입니다" msgid "data vectors must be the same length" msgstr "ë°ì´í„° ë²¡í„°ë“¤ì€ ë°˜ë“œì‹œ ê°™ì€ ê¸¸ì´ì—¬ì•¼ 합니다" msgid "missing or infinite values in the data are not allowed" msgstr "ë°ì´í„°ì— 결측값 ë˜ëŠ” 무한한 ê°’ë“¤ì€ í—ˆìš©ë˜ì§€ 않습니다" msgid "only finite values are allowed in 'lims'" msgstr "'lims'ì—는 오로지 유한한 값들만 허용ë©ë‹ˆë‹¤" #, fuzzy msgid "bandwidths must be strictly positive" msgstr "'h'는 반드시 양수ì´ì–´ì•¼ë§Œ 합니다" msgid "'x' is not a matrix" msgstr "'x'는 í–‰ë ¬ì´ ì•„ë‹™ë‹ˆë‹¤" msgid "infinite, NA or NaN values in 'x'" msgstr "'x'ì— ë¬´í•œí•œ ê°’, NA ë˜ëŠ” NaNì´ ìžˆìŠµë‹ˆë‹¤" msgid "nrow(x) and length(grouping) are different" msgstr "nrow(x)와 length(grouping)ê°€ 서로 다릅니다" msgid "invalid 'prior'" msgstr "유효하지 ì•Šì€ 'prior'입니다" msgid "'prior' is of incorrect length" msgstr "'prior'ì˜ ê¸¸ì´ê°€ 잘못ë˜ì—ˆìŠµë‹ˆë‹¤" msgid "cannot use leave-one-out CV with method %s" msgstr "" msgid "rank = 0: variables are numerically constant" msgstr "" msgid "variables are collinear" msgstr "ë³€ìˆ˜ë“¤ì´ ê³µì„ í˜•ê´€ê³„ì— ìžˆìŠµë‹ˆë‹¤" msgid "'nu' must exceed 2" msgstr "'nu'는 반드시 2를 넘어야 합니다" msgid "group means are numerically identical" msgstr "그룹 í‰ê· ë“¤ì´ 수치ì ìœ¼ë¡œ ë™ì¼í•©ë‹ˆë‹¤" msgid "object not of class \"lda\"" msgstr "í´ëž˜ìФ \"lda\"ê°€ 아닌 ê°ì²´ìž…니다" msgid "wrong number of variables" msgstr "ë³€ìˆ˜ë“¤ì˜ ê°œìˆ˜ê°€ 잘못ë˜ì—ˆìŠµë‹ˆë‹¤" msgid "variable names in 'newdata' do not match those in 'object'" msgstr "'newdata'ë‚´ì˜ ë³€ìˆ˜ëª…ë“¤ì´ 'object'ë‚´ì˜ ë³€ìˆ˜ëª…ë“¤ê³¼ ì¼ì¹˜í•˜ì§€ 않습니다" msgid "'breaks' must be strictly increasing" msgstr "'breaks'는 반드시 ì¦ê°€ë§Œì„ 해야 합니다" msgid "'breaks' do not cover the data" msgstr "" msgid "dim(W) is not correct" msgstr "dim(W)ì´ ì •í™•í•˜ì§€ 않습니다" msgid "'W' is not positive definite" msgstr "'W'는 positive definiteê°€ 아닙니다" msgid "'data' has no 'terms' attribute" msgstr "'data'는 'terms'ë¼ëŠ” ì†ì„±ì´ 없습니다" msgid "formula specifies no response" msgstr "" msgid "'object' has no 'call' component. Updating not possible" msgstr "" "'object'ê°€ 'call' 요소를 가지고 있지 않습니다. 가능하지 ì•Šì€ ì—…ë°ì´íŠ¸ìž…ë‹ˆë‹¤" msgid "Response variable must be positive after additions" msgstr "" msgid "missing values are not allowed" msgstr "ê²°ì¸¡ê°’ë“¤ì€ í—ˆìš©ë˜ì§€ 않습니다" msgid "'x' and 'y' must have the same number of rows" msgstr "'x'와 'y'는 반드시 í–‰ì˜ ê°œìˆ˜ì™€ 같아야 합니다" msgid "'quantile' must be at most %d" msgstr "" msgid "'ps' must be at least 'p'" msgstr "" msgid "'lqs' failed: all the samples were singular" msgstr "" msgid "missing or infinite values are not allowed" msgstr "결측값 ë˜ëŠ” 무한한 ê°’ë“¤ì€ í—ˆìš©ë˜ì§€ 않습니다" msgid "at least %d cases are needed" msgstr "" msgid "'quantile' must be at least %d" msgstr "" msgid "at least one column has IQR 0" msgstr "ì ì–´ë„ í•˜ë‚˜ì˜ ì—´ì´ IQR 0ì„ ê°€ì§‘ë‹ˆë‹¤" msgid "'x' is probably collinear" msgstr "" msgid "all variables must be factors" msgstr "모든 ë³€ìˆ˜ë“¤ì´ ìš”ì¸í˜•ì´ì–´ì•¼ 합니다" msgid "factors in 'newdata' do not match those for 'object'" msgstr "'newdata'ì— ìžˆëŠ” ìš”ì¸ë“¤ì´ 'object'ì— ìžˆëŠ” ìš”ì¸ë“¤ê³¼ ì¼ì¹˜í•˜ì§€ 않습니다" msgid "'newdata' is not of the right length" msgstr "'newdata'ì˜ ê¸¸ì´ê°€ 올바르지 않습니다" msgid "'X' must be a numeric or complex matrix" msgstr "'X'는 반드시 수치형 ë˜ëŠ” 복소수형 행렬ì´ì–´ì•¼ 합니다" msgid "incompatible arguments" msgstr "" msgid "'Sigma' is not positive definite" msgstr "'Sigma'는 positive definiteê°€ 아닙니다" msgid "'theta' must be given" msgstr "'theta'는 반드시 주어져야 합니다" msgid "negative values not allowed for the negative binomial family" msgstr "negative binomial 페밀리ì—서 허용ë˜ì§€ 않는 ìŒì˜ 값들입니다" msgid "tests made without re-estimating 'theta'" msgstr "'theta'를 재추정하지 ì•Šì€ ìƒíƒœì—서 만들어진 테스트들입니다" msgid "only Chi-squared LR tests are implemented" msgstr "오로지 Chi-squared LR í…ŒìŠ¤íŠ¸ë“¤ë§Œì´ êµ¬í˜„ë˜ì–´ 있습니다" msgid "not all objects are of class \"negbin\"" msgstr "모든 ê°ì²´ë“¤ì´ í´ëž˜ìФ \"negbin\"는 아닙니다" msgid "unimplemented method: %s" msgstr "구현ë˜ì§€ ì•Šì€ ë©”ì†Œë“œìž…ë‹ˆë‹¤: %s" msgid "Initial fit:" msgstr "" msgid "Initial value for 'theta': %f" msgstr "다ìŒì€ 'theta'ì— ëŒ€í•œ 초기값입니다: %f" msgid "alternation limit reached" msgstr "" msgid "'theta' must be specified" msgstr "'theta'는 반드시 지정ë˜ì–´ì•¼ 합니다" msgid "" "\"%s\" link not available for negative binomial family; available links are " "\"identity\", \"log\" and \"sqrt\"" msgstr "" msgid "estimate truncated at zero" msgstr "" msgid "theta.ml: iter" msgstr "theta.ml: iter" msgid "theta =" msgstr "theta =" msgid "extra arguments discarded" msgstr "" msgid "at least 3 distinct 'x' values are needed" msgstr "ì ì–´ë„ 3ê°œì˜ ë‹¤ë¥¸ 'x'ê°’ë“¤ì´ í•„ìš”í•©ë‹ˆë‹¤" msgid "an intercept is needed and assumed" msgstr "" msgid "response must be a factor" msgstr "response는 반드시 ìš”ì¸ì´ì–´ì•¼ 합니다" msgid "response must have 3 or more levels" msgstr "response는 반드시 세개 ì´ìƒì˜ ë ˆë²¨ë“¤ì„ ê°€ì§€ê³  있어야 합니다" msgid "attempt to find suitable starting values failed" msgstr "ì ì ˆí•œ 시작ì ë“¤ì„ 찾지 못했습니다" msgid "design appears to be rank-deficient, so dropping some coefs" msgstr "" msgid "'start' is not of the correct length" msgstr "'start'ì˜ ê¸¸ì´ê°€ 올바르지 않습니다" msgid "Re-fitting to get Hessian" msgstr "" msgid "not a \"polr\" object" msgstr "\"polr\" ê°ì²´ê°€ 아닙니다" msgid "anova is not implemented for a single \"polr\" object" msgstr "anova는 ë‹¨ì¼ \"polr\" ê°ì²´ì— 구현ë˜ì–´ 있지 않습니다" msgid "not all objects are of class \"polr\"" msgstr "모든 ê°ì²´ë“¤ì´ í´ëž˜ìФ \"polr\"는 아닙니다" msgid "models were not all fitted to the same size of dataset" msgstr "" msgid "Parameter:" msgstr "파ë¼ë¯¸í„°:" msgid "down" msgstr "" msgid "up" msgstr "" msgid "" "profiling has found a better solution, so original fit had not converged" msgstr "" "프로파ì¼ë§ì´ ë” ë‚˜ì€ í•´ë¥¼ 찾았으므로 ì´ˆê¸°ì˜ ì í•©ì€ 수렴하지 않았ìŒì„ ì˜ë¯¸í•©ë‹ˆ" "다" msgid "weighted fits are not supported" msgstr "weighted fits는 ì§€ì›ë˜ì§€ 않습니다" msgid "some group is too small for 'qda'" msgstr "ì¼ë¶€ ê·¸ë£¹ì€ 'qda'ì— ë„ˆë¬´ 작습니다" msgid "rank deficiency in group %s" msgstr "" msgid "object not of class \"qda\"" msgstr "í´ëž˜ìФ \"qda\"ê°€ 아닌 ê°ì²´ìž…니다" msgid "cannot have leave-one-out CV with 'newdata'" msgstr "" msgid "'x' is singular: singular fits are not implemented in 'rlm'" msgstr "" msgid "invalid 'test.vec'" msgstr "유효하지 ì•Šì€ 'test.vec'입니다" msgid "length of 'weights' must equal number of observations" msgstr "'weights'ì˜ ê¸¸ì´ëŠ” 반드시 ê´€ì¸¡ì¹˜ì˜ ê°œìˆ˜ì™€ 같아야 합니다" msgid "negative 'weights' value" msgstr "" msgid "some of ... do not match" msgstr "...ì˜ ì¼ë¶€ê°€ ì¼ì¹˜í•˜ì§€ 않습니다" msgid "'init' method is unknown" msgstr "'init' 메소드를 알 수 없습니다" msgid "'c' must be at least 1.548 and has been ignored" msgstr "" msgid "'method' is unknown" msgstr "'method'를 알 수 없습니다" msgid "'rlm' failed to converge in %d steps" msgstr "" msgid "'coef' must define a contrast, i.e., sum to 0" msgstr "'coef'는 반드시 í•©ì´ 0ì´ë˜ë„ë¡ contrast를 ì •ì˜í•´ì•¼ 합니다" msgid "'coef' must have same length as 'contrast.obj'" msgstr "'coef'는 반드시 'contrast.obj'와 ê°™ì€ ê¸¸ì´ë¥¼ 가져야 합니다" msgid "each element of '%s' must be logical" msgstr "'%s'ì˜ ê° ìš”ì†ŒëŠ” 반드시 논리ì ì´ì–´ì•¼ 합니다" msgid "the contrast defined is empty (has no TRUE elements)" msgstr "" msgid "columns of 'contrast.obj' must define a contrast (sum to zero)" msgstr "'contrast.obj'ì˜ ì—´ë“¤ì€ í•©ì´ 0ì´ë˜ë„ë¡ contrast를 ì •ì˜í•´ì•¼ 합니다" msgid "\"gradient\" attribute missing" msgstr "\"gradient\" ì†ì„±ì´ 빠져있습니다" msgid "\"hessian\" attribute missing" msgstr "\"hessian\" ì†ì„±ì´ 빠져있습니다" msgid "regression apparently linear" msgstr "" msgid "Infs not allowed in 'd'" msgstr "'d'ì— í—ˆìš©ë˜ì§€ 않는 Inf입니다" msgid "an initial configuration must be supplied if there are NAs in 'd'" msgstr "'d'ì— NAê°€ 있다면 ì´ˆê¸°ì„¤ì •ì´ ë°˜ë“œì‹œ 제공ë˜ì–´ì•¼ 합니다" msgid "'use.start' cannot be used with R's version of 'glm'" msgstr "'use.start'는 Rì˜ 'glm'버전과 함께 ì“°ì¼ ìˆ˜ 없습니다" msgid "AIC is not defined for this model, so 'stepAIC' cannot proceed" msgstr "ì´ ëª¨ë¸ì— 대해서 AIC는 ì •ì˜ë˜ì–´ 있지 않아 'stepAIC'를 구할 수 없습니다" msgid "AIC is -infinity for this model, so 'stepAIC' cannot proceed" msgstr "" "ì´ ëª¨ë¸ì— 대한 AICê°€ ìŒì˜ ë¬´í•œê°’ì„ ê°€ì§€ë¯€ë¡œ 'stepAIC'를 구할 수 없습니다" msgid "0 df terms are changing AIC" msgstr "" msgid "AIC undefined for REML fit" msgstr "REML ì í•©ì— ì •ì˜ë˜ì§€ ì•Šì€ AIC입니다" msgid "'nbins' must result in a positive integer" msgstr "'nbins'는 반드시 ê²°ê³¼ì ìœ¼ë¡œ ì–‘ì˜ ì •ìˆ˜ë¥¼ 주어야 합니다" msgid "'h' must be strictly positive" msgstr "'h'는 반드시 양수ì´ì–´ì•¼ë§Œ 합니다" msgid "uneven breaks with 'prob = FALSE' will give a misleading plot" msgstr "" msgid "'x' has length zero" msgstr "'x'ì˜ ê¸¸ì´ê°€ 0입니다" msgid "no solution in the specified range of bandwidths" msgstr "주어진 bandwidthsì˜ ë²”ìœ„ë‚´ì— ì†”ë£¨ì…˜ì´ ì—†ìŠµë‹ˆë‹¤" msgid "minimum occurred at one end of the range" msgstr "rangeì˜ ëì—서 ìµœì†Œê°’ì„ ì°¾ì•˜ìŠµë‹ˆë‹¤" msgid "using the %d/%d row from a combined fit" msgid_plural "using the %d/%d rows from a combined fit" msgstr[0] "" msgid "group %s is empty" msgid_plural "groups %s are empty" msgstr[0] "" msgid "variable %s appears to be constant within groups" msgid_plural "variables %s appear to be constant within groups" msgstr[0] "" msgid "only %d set, so all sets will be tried" msgid_plural "only %d sets, so all sets will be tried" msgstr[0] "" msgid "%d missing observation deleted" msgid_plural "%d missing observations deleted" msgstr[0] "" msgid "%d row with zero weights not counted" msgid_plural "%d rows with zero weights not counted" msgstr[0] "가중치를 가지지 않는 %dí–‰ì€ ì„¸ì–´ì§€ì§€ 않습니다" MASS/inst/0000755000176000001440000000000012620323725012024 5ustar ripleyusersMASS/inst/po/0000755000176000001440000000000012121561422012434 5ustar ripleyusersMASS/inst/po/pl/0000755000176000001440000000000011772553043013062 5ustar ripleyusersMASS/inst/po/pl/LC_MESSAGES/0000755000176000001440000000000011772553043014647 5ustar ripleyusersMASS/inst/po/pl/LC_MESSAGES/R-MASS.mo0000644000176000001440000004466112315222153016146 0ustar ripleyusersÞ•­„éì f‘ø>1Jp-» é ''%Ou$”/¹ é- .8g0‡)¸âú+1])q$›À8Ó +Ed$‚$§Ì<é&<!V4x-­'Û+;E&¨½<Ù>Up°È Ýê &C2]#´ÏèA =b" 3Ã÷)>/\$Œ"±2Ô+*3>^$;Âþ5J$Ot‘¬4Æû!;%U%{/¡Ñ!è& 1ATr„ ¬¹ Õâ%ú5 0V(‡*°!Û6ý46S(г(Ì<õ2 0O € 0” Å &× þ %!#8!\!*x!9£!Ý!÷!N""`")ƒ"(­"Ö"ê"H#,O#|#˜#µ##Ï#"ó#$$!;$]$7v$(®$4×$ %% #% /%=;%y%’%«%P®%aÿ%:a&œ&´&Ô&3î&"'{6)²)Î)‡é)˜q*0 +-;+)i+9“+7Í+$,-*,?X,2˜,9Ë,9-#?-?c-9£-(Ý-'...9L.†.8.3Ö.! /P,/0}/®/#Í/!ñ/'01;0!m0N0!Þ0%14&1<[13˜1;Ì1'2023P2H„23Í2 3'"3^J3Q©3.û3**4)U4)4©4Å4%Þ45 5($5)M5Bw5(º5"ã5(6(/6X6Oq6LÁ6/7Q>7+71¼7)î7G80`8%‘8;·8.ó80"9JS9/ž9NÎ9:G8:€:-…:"³:'Ö:&þ:;%;(a;)Š;"´;9×;%<N7<†<)š<,Ä<ñ<=$.=S=k=ˆ==±= Ð=)Ü=/>:6>5q>=§>8å>,?IK?&•?N¼?L @X@'s@@›@Ü@8ú@3A,FAsA/A½A'×A&ÿA/&B-VBG„BÌBèBµC*ºC@åC1&D XDyDb•D%øDE#7E%[E3E#µE,ÙE(F/F<MF0ŠF@»FüFGG(G89GrG‘G§G›ªGFH@ÔHI%0IVI=sI9‘¨xhªO;lƒT„ac_H0 (R’ž)«N‚%{#"s UfoЉ€”¬p¥W­ŸMYˆ!¤2yn|r‹kZ†6©X4=›-—<u>3q¦^]*™Œ@D}b `LIŽœ,Q~‡KS“•v1t GdeA/§7mšJ –…j¢'Cz£EwF˜5.Pig :?\8&B$[¡ +V"%s" link not available for negative binomial family; available links are "identity", "log" and "sqrt""gradient" attribute missing"hessian" attribute missing%d missing observation deleted%d missing observations deleted%d row with zero weights not counted%d rows with zero weights not counted%s does not have both 'qr' and 'y' components'Sigma' is not positive definite'W' is not positive definite'X' must be a numeric or complex matrix'anova' is not available for PQL fits'breaks' do not cover the data'breaks' must be strictly increasing'c' must be at least 1.548 and has been ignored'center' is not the right length'coef' must define a contrast, i.e., sum to 0'coef' must have same length as 'contrast.obj''data' has no 'terms' attribute'densfun' must be supplied as a function or name'dropterm' not implemented for "mlm" fits'family' not recognized'h' must be strictly positive'init' method is unknown'lqs' failed: all the samples were singular'method' is unknown'nbins' must result in a positive integer'newdata' is not of the right length'nu' must exceed 2'object' has no 'call' component. Updating not possible'prior' is of incorrect length'ps' must be at least 'p''quantile' must be at least %d'quantile' must be at most %d'rlm' failed to converge in %d steps'start' is not of the correct length'start' must be a named list'start' specifies names which are not arguments to 'densfun''theta' must be given'theta' must be specified'uin' is too large to fit plot in'use.start' cannot be used with R's version of 'glm''x' and 'y' must have the same number of rows'x' contains missing or infinite values'x' has length zero'x' is not a matrix'x' is probably collinear'x' is singular: singular fits are not implemented in 'rlm''x' must be a non-empty numeric vector'y' must be a matrix0 df terms are changing AICAIC is -infinity for this model, so 'stepAIC' cannot proceedAIC is not defined for this model, so 'stepAIC' cannot proceedAIC undefined for REML fitExponential values must be >= 0F test assumes 'quasi%s' familyInfs not allowed in 'd'Initial estimate: %sInitial fit:Initial value for 'theta': %fIter. %d Alpha: %sParameter:Probable convergence failureRe-fitting to get HessianResponse variable must be positive after additionsWaiting for profiling to be done...Weibull values must be > 0all frequencies are zeroall variables must be factorsalternation limit reachedan initial configuration must be supplied if there are NAs in 'd'an initial configuration must be supplied with NA/Infs in 'd'an intercept is needed and assumedanova is not implemented for a single "polr" objectat least %d cases are neededat least 3 distinct 'x' values are neededat least one column has IQR 0attempt to find suitable starting values failedbandwidths must be strictly positivebiplot is only possible if nf >= 2cannot estimate scale: MAD is zero for this samplecannot have leave-one-out CV with 'newdata'cannot use leave-one-out CV with method %scolumns of 'contrast.obj' must define a contrast (sum to zero)data vectors must be the same lengthdesign appears to be rank-deficient, so dropping some coefsdim(W) is not correctdistances must be result of 'dist' or a square matrixdowneach element of '%s' must be logicalempty row or column in tableestimate truncated at zeroextra arguments discardedfactors in 'newdata' do not match those for 'object'formula specifies no responsefrequency table is %d-dimensionalgamma values must be >= 0group %s is emptygroups %s are emptygroup means are numerically identicalhigher-way table requested. Only 2-way allowedincompatible argumentsinfinite, NA or NaN values in 'x'initial configuration must be completeinvalid 'prior'invalid 'test.vec'invalid initial configurationinvalid length(d)invalid number of levelsinvalid row(x)invalid sizeinvalid table specificationiteration %diteration limit reachediteration limit reached near 'x = %f'length of 'weights' must equal number of observationslength of 'wt' must equal number of observationsminimum occurred at one end of the rangemissing or infinite values are not allowedmissing or infinite values in 'x'missing or infinite values in the data are not allowedmissing values are not allowedmodels were not all fitted to the same size of datasetneed positive values to fit a log-Normalnegative 'weights' valuenegative or non-integer entries in tablenegative values not allowed for the negative binomial familynegative weights not allowedno 'addterm' method implemented for "mlm" modelsno positive weightsno solution in the specified range of bandwidthsno terms in scopeno terms in scope for adding to objectnot a "polr" objectnot all objects are of class "negbin"not all objects are of class "polr"not enough non-missing datanrow(x) and length(grouping) are differentnumber of rows in use has changed: remove missing values?object not of class "lda"object not of class "qda"only %d set, so all sets will be triedonly %d sets, so all sets will be triedonly 'REML = FALSE' is implementedonly Chi-squared LR tests are implementedonly finite values are allowed in 'lims'optimization failedpackage 'nlme' is essentialprofiling has found a better solution, so original fit had not convergedrank = 0: variables are numerically constantrank deficiency in group %sregression apparently linearresponse must be a factorresponse must have 3 or more levelsresponse variable must be positivescope is not a subset of term labelssome group is too small for 'qda'some of ... do not matchsupplying pars for the %s distribution is not supportedtests made without re-estimating 'theta'the contrast defined is empty (has no TRUE elements)theta =theta.ml: itertrying + %strying - %suneven breaks with 'prob = FALSE' will give a misleading plotunimplemented method: %sunsupported distributionupusing the %d/%d row from a combined fitusing the %d/%d rows from a combined fitvariable %s appears to be constant within groupsvariables %s appear to be constant within groupsvariable names in 'newdata' do not match those in 'object'variables are collinearweighted fits are not supportedwrong number of variableszero or negative distance between objects %d and %dProject-Id-Version: MASS 7.3-30 Report-Msgid-Bugs-To: bugs@R-project.org POT-Creation-Date: 2013-03-18 09:49 PO-Revision-Date: 2014-03-24 17:44+0100 Last-Translator: Åukasz Daniel Language-Team: Åukasz Daniel Language: pl_PL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2); X-Poedit-SourceCharset: iso-8859-1 X-Generator: Poedit 1.5.4 połączenie "%s" nie jest dostÄ™pne dla rodziny rozkÅ‚adu Pascala; dostÄ™pne połączenia to "identity", "log" oraz "sqrt"brakuje atrybutu "gradient"brakuje atrybutu "hessian"%d brakujÄ…ca obserwacja zostaÅ‚a usuniÄ™ta%d brakujÄ…ce obserwacje zostaÅ‚y usuniÄ™te%d brakujÄ…cych obserwacji zostaÅ‚o usuniÄ™tych%d wiersz z zerowymi wagami nie zostaÅ‚ zliczony%d wiersze z zerowymi wagami nie zostaÅ‚y zliczone%d wierszy z zerowymi wagami nie zostaÅ‚o zliczonych%s nie posiada obydwu komponentów 'qr' oraz 'y'argument 'Sigma' nie jest dodatnio okreÅ›lonaargument 'W' nie jest dodatnio okreÅ›lonyargument 'X' musi być macierzÄ… liczbowÄ… lub zespolonÄ…funkcja 'anova()' nie jest dostÄ™pna dla dopasowaÅ„ PQLargument 'breaks' nie pokrywa danychargument 'breaks' musi być Å›ciÅ›le rosnÄ…cy'c' musi być co najmniej 1.548; wartość zostaÅ‚a zignorowaneargument 'center' nie posiada poprawnej dÅ‚ugoÅ›ci'coef' musi definiować kontrast, tzn. sumować siÄ™ do 0'coef' musi mieć tÄ™ samÄ… dÅ‚ugość jak 'contrast.obj''data' nie posiada atrybutu 'terms'argument 'densfun' musi być dostarczony jako funkcja lub nazwa'dropterm' nie jest zaimplementowane dla dopasowaÅ„ "mlm"argument 'family' nie zostaÅ‚ rozpoznanyargument 'h' musi być Å›ciÅ›le dodatnimetoda 'init()' jest nieznana'lqs' nie powiodÅ‚o siÄ™: wszystkie próby byÅ‚y osobliwe'method' jest nieznaneargument 'nbins' musi być dodatniÄ… liczbÄ… caÅ‚kowitÄ…argument 'newdata' nie posiada poprawnej dÅ‚ugoÅ›ciargument 'nu' musi przekraczać 2argument 'object' nie posiada komponentu 'call'. Aktualizacja nie jest możliwaargument 'prior' posiada niepoprawnÄ… dÅ‚ugość'ps' musi być co najmniej 'p''quantile' musi być co najmniej %d'quantile' musi być najwyżej %d'rlm' nie uzbieżniÅ‚ siÄ™ w %d krokachargument 'start' nie posiada poprawnej dÅ‚ugoÅ›ci'start' musi być nazwanÄ… listÄ…'start' okreÅ›la nazwy, które nie sÄ… argumentami przekazywanymi do 'densfun'argument 'theta' musi być podanyargument 'theta' musi być okreÅ›lonyargument 'uin' jest zbyt duży aby dopasować wykresargument 'use.start' nie może być użyty z wersjÄ… R 'glm''x' oraz 'y' muszÄ… mieć tÄ™ samÄ… liczbÄ™ wierszyargument 'x' zawiera brakujÄ…ce lub nieskoÅ„czone wartoÅ›ciargument 'x' posiada zerowÄ… dÅ‚ugośćargument 'x' nie jest macierzÄ…argument 'x' jest najprawdopodobniej współliniowy'x' jest osobliwe: osobliwe dopasowania nie sÄ… zaimplementowane w 'rlm'argument 'x' musi być niepustym wektorem liczbowymargument 'y' musi być macierzÄ…0 różniczkowych czÅ‚onów zmienia AICAIC wynosi minus nieskoÅ„czoność dla tego modelu, tak wiÄ™c 'stepAIC' nie może kontynuowaćAIC nie jest zdefiniowane dla tego modelu, wiÄ™c 'stepAIC' nie może kontynuowaćAIC nie jest zdefiniowane dla dopasowania REMLwartoÅ›ci eksponencjalne muszÄ… być >= 0test Fishera zakÅ‚ada rodzinÄ™ 'kwazi-%s'NieskoÅ„czonoÅ›ci nie sÄ… dozwolone w 'd'PoczÄ…tkowe oszacowanie: %sPoczÄ…tkowe dopasowanie:PoczÄ…tkowa wartość dla 'theta': %fIteracja %d Alpha: %sParametr:Prawdopodobne niepowodzenie zbieżnoÅ›ciPonowne dopasowywanie aby uzyskać hesjanZmienna zależna musi być dodatniÄ… po wykonanych uzupeÅ‚nieniachOczekiwanie na wykonanie profilowania...wartoÅ›ci Weibulla muszÄ… być > 0wszystkie czÄ™stotliwoÅ›ci wynoszÄ… zerowszystkie zmienne muszÄ… być czynnikamiosiÄ…gniÄ™to limit zmianpoczÄ…tkowa konfiguracja musi zostać dostarczona jeÅ›li sÄ… wartoÅ›ci Na w 'd'poczÄ…tkowa konfiguracja musi zostać dostarczona z wartosciami NA/Inf w 'd'przeciÄ™cie jest potrzebne oraz jest zakÅ‚adanefunkcja 'anova()' nie jest zaimplementowana dla pojedynczego obiektu klasy "polr"co najmniej %d przypadków jest potrzebnychco najmniej 3 różne wartoÅ›ci 'x' sÄ… potrzebneco najmniej jedna kolumna ma IQR równe 0próba znalezienia odpowiednich wartoÅ›ci startowych nie powiodÅ‚a siÄ™szerokoÅ›ci pasma muszÄ… być Å›ciÅ›cle dodatniebiplot jest możliwy jeÅ›li 'nf >= 2'nie można oszacować skali: MAD wynosi zero dla tej próbynie można mieć 'zostaw-jeden' CV z 'newdata'nie można użyć 'zostaw-jeden' CV z metodÄ… %skolumny 'contrast.obj' muszÄ… okreÅ›lać konstrast (sumować siÄ™ do zera)wektory danych muszÄ… być tej samej dÅ‚ugoÅ›ciprojekt wydaje siÄ™ mieć deficyt rang, wiÄ™c usuwam niektóre współczynniki'dim(W)' nie jest poprawneodlegÅ‚oÅ›ci muszÄ… być wynikiem 'dist' lub być kwadratowÄ… macierzÄ…downkażdy element '%s' musi być typem logicznympusty wiersz lub kolumna w tablicyoszacowanie zostaÅ‚o przyciÄ™te w zerzedodatkowe argumenty zostaÅ‚y odrzuconeczynniki w 'newdata' nie zgadzajÄ… siÄ™ z tymi dla 'object'formuÅ‚a nie okreÅ›la zmiennej zależnejtabela czÄ™stotliwoÅ›ci jest %d-wymiarowawartoÅ›ci 'gamma' muszÄ… być >= 0grupa %s jest pustagrupy %s sÄ… pustegrupy %s sÄ… pusteÅ›rednie grup sÄ… liczbowo identycznezażądano tabeli o wyższej kierunkowoÅ›ci. Tylko 2-kierunkowa jest dozwolonaniezgodne argumentywartoÅ›ci nieskoÅ„czone, NA lub NaN w 'x'poczÄ…tkowa konfiguracja musi być kompletnaniepoprawny argument 'prior'niepoprawny argument 'test.vec'niepoprawna poczÄ…tkowa konfiguracjaniepoprawne 'length(d)'niepoprawna liczba poziomówniepoprawne 'row(x)'niepoprawny rozmiarniepoprawne okreÅ›lenie tabeliiteracja %dosiÄ…gniÄ™to graniczÄ… wartość iteracjiosiÄ…gniÄ™to limit iteracji w pobliżu 'x = %f'dÅ‚ugość 'weights' musi równać siÄ™ liczbie obserwacjidÅ‚ugość 'wt' musi równać siÄ™ liczbie obserwacjipojawiÅ‚a siÄ™ wartość minimum na jednym z koÅ„ców zakresuwartoÅ›ci brakujÄ…ce lub nieskoÅ„czone nie sÄ… dozwolonebrakujÄ…ce lub nieskoÅ„czone wartoÅ›ci w 'x'wartoÅ›ci brakujÄ…ce lub nieskoÅ„czone nie sÄ… dozwolone w zbiorze danychbrakujÄ…ce wartoÅ›ci nie sÄ… dozwolonenie wszystkie modele zostaÅ‚y dopasowane do zbioru danych tego samego rozmiarupotrzeba dodatnich wartoÅ›ci aby dopasować rozkÅ‚ad logarytmicznie normalnyujemna wartość 'weights'ujemne lub niecaÅ‚kowite wpisy w tabeliujemne wartoÅ›ci nie sÄ… dozwolone dla rodziny rozkÅ‚adu Pascalaujemne wagi nie sÄ… dozwolonebrak zaimplementowanej metody 'addterm' dla modeli "mlm"brak dodatnich wagbrak rozwiÄ…zania we wskazanym zakresie pasmbrak czÅ‚onów w zakresiebrak czÅ‚onów w zakresie do dodania do obiektuto nie jest obiekt "polr"nie wszystkie obiekt sÄ… klasy "negbin"nie wszystkie obiekty sÄ… klasy "polr"niewystarczajÄ…ca liczba niebrakujÄ…cych danych'nrow(x)' oraz 'length(grouping)' sÄ… różneliczba wierszy w użyciu zmieniÅ‚a siÄ™: usunąć brakujÄ…ce wartoÅ›ci?obiekt nie jest klasy "lda"obiekt nie jest klasy "qda"tylko %d zbiór, wiÄ™c wszystkie zbiory zostanÄ… sprawdzonetylko %d zbiory, wiÄ™c wszystkie zbiory zostanÄ… sprawdzonetylko %d zbiorów, wiÄ™c wszystkie zbiory zostanÄ… sprawdzonetylko 'REML = FALSE' jest zaimplementowanejedynie testy Chi-kwadrat regresji liniowej sÄ… zaimplementowanetylko skoÅ„czone wartoÅ›ci sÄ… dozwolone w 'lims'optymalizacja nie powiodÅ‚a siÄ™pakiet 'nlme' jest kluczowyprofilowanie znalazÅ‚o lepsze rozwiÄ…zanie, tak wiÄ™c oryginalne dopasowanie nie uzbieżniÅ‚o siÄ™rank = 0: zmienne sÄ… liczbowo staÅ‚edeficyt rang w grupie %sregresja jest najwyraźniej liniowazmienna zależna musi być czynnikiemzmienna zależna musi mieć 3 lub wiÄ™cej poziomówzmienna zależna musi być dodatniazakres nie jest podzbiorem etykiet czÅ‚onówniektóre grupy sÄ… zbyt maÅ‚e dla 'qda'niektóre z '...' nie pasujÄ…dostarczanie parametrów dla rozkÅ‚adu %s nie jest wspieranetesty wykonano bez ponownego oszacowania 'theta'zdefiniowany kontrast jest pusty (nie ma PRAWDZIWYCH elementów)theta =theta.ml: iteracjapróbowanie + %spróbowanie - %snieparzyste przerwy z 'prob = FALSE' dadzÄ… mylny wykresniezaimplementowana metoda: %sniewspierany rozkÅ‚adupużywanie %d/%d wiersza z połączonego dopasowaniaużywanie %d/%d wierszy z połączonego dopasowaniaużywanie %d/%d wierszy z połączonego dopasowaniazmienna %s wyglÄ…da na stałą wewnÄ…trz grupzmienne %s wyglÄ…dajÄ… na staÅ‚e wewnÄ…trz grupzmienne %s wyglÄ…dajÄ… na staÅ‚e wewnÄ…trz grupnazwy zmiennych w 'newdata' nie zgadzajÄ… siÄ™ z tymi w 'object'zmienne sÄ… współlinioweważone dopasowania nie sÄ… wspieraneniepoprawna liczba zmiennychzerowa lub ujemna odlegÅ‚ość pomiÄ™dzy obiektami %d oraz %dMASS/inst/po/en@quot/0000755000176000001440000000000011754561330014060 5ustar ripleyusersMASS/inst/po/en@quot/LC_MESSAGES/0000755000176000001440000000000011772553043015647 5ustar ripleyusersMASS/inst/po/en@quot/LC_MESSAGES/R-MASS.mo0000644000176000001440000004106612315222153017142 0ustar ripleyusersÞ•­„éì f‘ø>1Jp-» é ''%Ou$”/¹ é- .8g0‡)¸âú+1])q$›À8Ó +Ed$‚$§Ì<é&<!V4x-­'Û+;E&¨½<Ù>Up°È Ýê &C2]#´ÏèA =b" 3Ã÷)>/\$Œ"±2Ô+*3>^$;Âþ5J$Ot‘¬4Æû!;%U%{/¡Ñ!è& 1ATr„ ¬¹ Õâ%ú5 0V(‡*°!Û6ý46S(г(Ì<õ2 0O € 0” Å &× þ %!#8!\!*x!9£!Ý!÷!N""`")ƒ"(­"Ö"ê"H#,O#|#˜#µ##Ï#"ó#$$!;$]$7v$(®$4×$ %% #% /%=;%y%’%«%P®%aÿ%:a&œ&´&Ô&3î&6"'fY(À(Ý(>ù(J8)5ƒ)$¹) Þ)+ÿ))+*"U*(x*3¡*$Õ*1ú*6,+'c+4‹+-À+î+! ,,,/I,y,-‘,(¿,è,@ÿ,"@-!c-"…-!¨-(Ê-(ó- .D=.‚.œ.%º.<à.5/+S//—/¯/CÍ/*0<0U0@q0B²0õ01#01T1p1 …1!’1´1 Ç1Ò1ï12 2#<2`2{2”2²2EÌ2A3"T33w3«3-È3ö3/4$D4"i42Œ4/¿4*ï4B5$]5;‚5¾59Ô56(6<6Y6t6<Ž6Ë6!é6 7%%7%K7/q7¡7%¸7&Þ78808N8`8y8 ˆ8•8 ±8¾8)Ö8994:9(o9*˜9%Ã96é9 :6?:(v:Ÿ:(¼:<å:";4?;t;0ˆ;¹;&Ë;ò;%<#,<P<*l<9—<Ñ<ë<N=&T=){=,¥=Ò=æ=H>,O>|>˜>µ>#Ï>"ó>$?%;?a?7z?,²?4ß?@@ +@ 7@AC@…@ž@·@Pº@a ABmA°AÈAèA3B9‘¨xhªO;lƒT„ac_H0 (R’ž)«N‚%{#"s UfoЉ€”¬p¥W­ŸMYˆ!¤2yn|r‹kZ†6©X4=›-—<u>3q¦^]*™Œ@D}b `LIŽœ,Q~‡KS“•v1t GdeA/§7mšJ –…j¢'Cz£EwF˜5.Pig :?\8&B$[¡ +V"%s" link not available for negative binomial family; available links are "identity", "log" and "sqrt""gradient" attribute missing"hessian" attribute missing%d missing observation deleted%d missing observations deleted%d row with zero weights not counted%d rows with zero weights not counted%s does not have both 'qr' and 'y' components'Sigma' is not positive definite'W' is not positive definite'X' must be a numeric or complex matrix'anova' is not available for PQL fits'breaks' do not cover the data'breaks' must be strictly increasing'c' must be at least 1.548 and has been ignored'center' is not the right length'coef' must define a contrast, i.e., sum to 0'coef' must have same length as 'contrast.obj''data' has no 'terms' attribute'densfun' must be supplied as a function or name'dropterm' not implemented for "mlm" fits'family' not recognized'h' must be strictly positive'init' method is unknown'lqs' failed: all the samples were singular'method' is unknown'nbins' must result in a positive integer'newdata' is not of the right length'nu' must exceed 2'object' has no 'call' component. Updating not possible'prior' is of incorrect length'ps' must be at least 'p''quantile' must be at least %d'quantile' must be at most %d'rlm' failed to converge in %d steps'start' is not of the correct length'start' must be a named list'start' specifies names which are not arguments to 'densfun''theta' must be given'theta' must be specified'uin' is too large to fit plot in'use.start' cannot be used with R's version of 'glm''x' and 'y' must have the same number of rows'x' contains missing or infinite values'x' has length zero'x' is not a matrix'x' is probably collinear'x' is singular: singular fits are not implemented in 'rlm''x' must be a non-empty numeric vector'y' must be a matrix0 df terms are changing AICAIC is -infinity for this model, so 'stepAIC' cannot proceedAIC is not defined for this model, so 'stepAIC' cannot proceedAIC undefined for REML fitExponential values must be >= 0F test assumes 'quasi%s' familyInfs not allowed in 'd'Initial estimate: %sInitial fit:Initial value for 'theta': %fIter. %d Alpha: %sParameter:Probable convergence failureRe-fitting to get HessianResponse variable must be positive after additionsWaiting for profiling to be done...Weibull values must be > 0all frequencies are zeroall variables must be factorsalternation limit reachedan initial configuration must be supplied if there are NAs in 'd'an initial configuration must be supplied with NA/Infs in 'd'an intercept is needed and assumedanova is not implemented for a single "polr" objectat least %d cases are neededat least 3 distinct 'x' values are neededat least one column has IQR 0attempt to find suitable starting values failedbandwidths must be strictly positivebiplot is only possible if nf >= 2cannot estimate scale: MAD is zero for this samplecannot have leave-one-out CV with 'newdata'cannot use leave-one-out CV with method %scolumns of 'contrast.obj' must define a contrast (sum to zero)data vectors must be the same lengthdesign appears to be rank-deficient, so dropping some coefsdim(W) is not correctdistances must be result of 'dist' or a square matrixdowneach element of '%s' must be logicalempty row or column in tableestimate truncated at zeroextra arguments discardedfactors in 'newdata' do not match those for 'object'formula specifies no responsefrequency table is %d-dimensionalgamma values must be >= 0group %s is emptygroups %s are emptygroup means are numerically identicalhigher-way table requested. Only 2-way allowedincompatible argumentsinfinite, NA or NaN values in 'x'initial configuration must be completeinvalid 'prior'invalid 'test.vec'invalid initial configurationinvalid length(d)invalid number of levelsinvalid row(x)invalid sizeinvalid table specificationiteration %diteration limit reachediteration limit reached near 'x = %f'length of 'weights' must equal number of observationslength of 'wt' must equal number of observationsminimum occurred at one end of the rangemissing or infinite values are not allowedmissing or infinite values in 'x'missing or infinite values in the data are not allowedmissing values are not allowedmodels were not all fitted to the same size of datasetneed positive values to fit a log-Normalnegative 'weights' valuenegative or non-integer entries in tablenegative values not allowed for the negative binomial familynegative weights not allowedno 'addterm' method implemented for "mlm" modelsno positive weightsno solution in the specified range of bandwidthsno terms in scopeno terms in scope for adding to objectnot a "polr" objectnot all objects are of class "negbin"not all objects are of class "polr"not enough non-missing datanrow(x) and length(grouping) are differentnumber of rows in use has changed: remove missing values?object not of class "lda"object not of class "qda"only %d set, so all sets will be triedonly %d sets, so all sets will be triedonly 'REML = FALSE' is implementedonly Chi-squared LR tests are implementedonly finite values are allowed in 'lims'optimization failedpackage 'nlme' is essentialprofiling has found a better solution, so original fit had not convergedrank = 0: variables are numerically constantrank deficiency in group %sregression apparently linearresponse must be a factorresponse must have 3 or more levelsresponse variable must be positivescope is not a subset of term labelssome group is too small for 'qda'some of ... do not matchsupplying pars for the %s distribution is not supportedtests made without re-estimating 'theta'the contrast defined is empty (has no TRUE elements)theta =theta.ml: itertrying + %strying - %suneven breaks with 'prob = FALSE' will give a misleading plotunimplemented method: %sunsupported distributionupusing the %d/%d row from a combined fitusing the %d/%d rows from a combined fitvariable %s appears to be constant within groupsvariables %s appear to be constant within groupsvariable names in 'newdata' do not match those in 'object'variables are collinearweighted fits are not supportedwrong number of variableszero or negative distance between objects %d and %dProject-Id-Version: MASS 7.3-30 POT-Creation-Date: 2014-03-18 14:00 PO-Revision-Date: 2014-03-18 14:00 Last-Translator: Automatically generated Language-Team: none MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: en Plural-Forms: nplurals=2; plural=(n != 1); "%s" link not available for negative binomial family; available links are "identity", "log" and "sqrt""gradient" attribute missing"hessian" attribute missing%d missing observation deleted%d missing observations deleted%d row with zero weights not counted%d rows with zero weights not counted%s does not have both ‘qr’ and ‘y’ components‘Sigma’ is not positive definite‘W’ is not positive definite‘X’ must be a numeric or complex matrix‘anova’ is not available for PQL fits‘breaks’ do not cover the data‘breaks’ must be strictly increasing‘c’ must be at least 1.548 and has been ignored‘center’ is not the right length‘coef’ must define a contrast, i.e., sum to 0‘coef’ must have same length as ‘contrast.obj’‘data’ has no ‘terms’ attribute‘densfun’ must be supplied as a function or name‘dropterm’ not implemented for "mlm" fits‘family’ not recognized‘h’ must be strictly positive‘init’ method is unknown‘lqs’ failed: all the samples were singular‘method’ is unknown‘nbins’ must result in a positive integer‘newdata’ is not of the right length‘nu’ must exceed 2‘object’ has no ‘call’ component. Updating not possible‘prior’ is of incorrect length‘ps’ must be at least ‘p’‘quantile’ must be at least %d‘quantile’ must be at most %d‘rlm’ failed to converge in %d steps‘start’ is not of the correct length‘start’ must be a named list‘start’ specifies names which are not arguments to ‘densfun’‘theta’ must be given‘theta’ must be specified‘uin’ is too large to fit plot in‘use.start’ cannot be used with R's version of ‘glm’‘x’ and ‘y’ must have the same number of rows‘x’ contains missing or infinite values‘x’ has length zero‘x’ is not a matrix‘x’ is probably collinear‘x’ is singular: singular fits are not implemented in ‘rlm’‘x’ must be a non-empty numeric vector‘y’ must be a matrix0 df terms are changing AICAIC is -infinity for this model, so ‘stepAIC’ cannot proceedAIC is not defined for this model, so ‘stepAIC’ cannot proceedAIC undefined for REML fitExponential values must be >= 0F test assumes ‘quasi%s’ familyInfs not allowed in ‘d’Initial estimate: %sInitial fit:Initial value for ‘theta’: %fIter. %d Alpha: %sParameter:Probable convergence failureRe-fitting to get HessianResponse variable must be positive after additionsWaiting for profiling to be done...Weibull values must be > 0all frequencies are zeroall variables must be factorsalternation limit reachedan initial configuration must be supplied if there are NAs in ‘d’an initial configuration must be supplied with NA/Infs in ‘d’an intercept is needed and assumedanova is not implemented for a single "polr" objectat least %d cases are neededat least 3 distinct ‘x’ values are neededat least one column has IQR 0attempt to find suitable starting values failedbandwidths must be strictly positivebiplot is only possible if nf >= 2cannot estimate scale: MAD is zero for this samplecannot have leave-one-out CV with ‘newdata’cannot use leave-one-out CV with method %scolumns of ‘contrast.obj’ must define a contrast (sum to zero)data vectors must be the same lengthdesign appears to be rank-deficient, so dropping some coefsdim(W) is not correctdistances must be result of ‘dist’ or a square matrixdowneach element of ‘%s’ must be logicalempty row or column in tableestimate truncated at zeroextra arguments discardedfactors in ‘newdata’ do not match those for ‘object’formula specifies no responsefrequency table is %d-dimensionalgamma values must be >= 0group %s is emptygroups %s are emptygroup means are numerically identicalhigher-way table requested. Only 2-way allowedincompatible argumentsinfinite, NA or NaN values in ‘x’initial configuration must be completeinvalid ‘prior’invalid ‘test.vec’invalid initial configurationinvalid length(d)invalid number of levelsinvalid row(x)invalid sizeinvalid table specificationiteration %diteration limit reachediteration limit reached near ‘x = %f’length of ‘weights’ must equal number of observationslength of ‘wt’ must equal number of observationsminimum occurred at one end of the rangemissing or infinite values are not allowedmissing or infinite values in ‘x’missing or infinite values in the data are not allowedmissing values are not allowedmodels were not all fitted to the same size of datasetneed positive values to fit a log-Normalnegative ‘weights’ valuenegative or non-integer entries in tablenegative values not allowed for the negative binomial familynegative weights not allowedno ‘addterm’ method implemented for "mlm" modelsno positive weightsno solution in the specified range of bandwidthsno terms in scopeno terms in scope for adding to objectnot a "polr" objectnot all objects are of class "negbin"not all objects are of class "polr"not enough non-missing datanrow(x) and length(grouping) are differentnumber of rows in use has changed: remove missing values?object not of class "lda"object not of class "qda"only %d set, so all sets will be triedonly %d sets, so all sets will be triedonly ‘REML = FALSE’ is implementedonly Chi-squared LR tests are implementedonly finite values are allowed in ‘lims’optimization failedpackage ‘nlme’ is essentialprofiling has found a better solution, so original fit had not convergedrank = 0: variables are numerically constantrank deficiency in group %sregression apparently linearresponse must be a factorresponse must have 3 or more levelsresponse variable must be positivescope is not a subset of term labelssome group is too small for ‘qda’some of ... do not matchsupplying pars for the %s distribution is not supportedtests made without re-estimating ‘theta’the contrast defined is empty (has no TRUE elements)theta =theta.ml: itertrying + %strying - %suneven breaks with ‘prob = FALSE’ will give a misleading plotunimplemented method: %sunsupported distributionupusing the %d/%d row from a combined fitusing the %d/%d rows from a combined fitvariable %s appears to be constant within groupsvariables %s appear to be constant within groupsvariable names in ‘newdata’ do not match those in ‘object’variables are collinearweighted fits are not supportedwrong number of variableszero or negative distance between objects %d and %dMASS/inst/po/ko/0000755000176000001440000000000012121561422013045 5ustar ripleyusersMASS/inst/po/ko/LC_MESSAGES/0000755000176000001440000000000012121561422014632 5ustar ripleyusersMASS/inst/po/ko/LC_MESSAGES/R-MASS.mo0000644000176000001440000003123112315222153016131 0ustar ripleyusersÞ•q¤—,ˆ ‰ ¦ J . 'K %s $™ ¾ -ß . < 0\  ¥ à Ü )ð $ ? 8R ‹ $ª Ï ì  4 -Q ' § » &Ï ö < >H‡¢Ââú -#8\wA®3ð)$N/l>œ$Û5$Lq4Ž!Ãå%ÿ!%GWjˆš³ ÂÏë509(j*“!¾6à(6<_œ0¹0ê-%A#g*‹9¶ð "$)G(qš®HÊ#-"Q!t–(¯Øàï:!\t”3®Øâ'»&ã@ ,K(xH¡8ê3#/WK‡JÓ.KM&™-À(î I80‚*³fÞ*E.p2Ÿ+Ò.þA- <o E¬ ò !H,!*u!a !^"/a"E‘"E×"'#.E#2t# §#<µ#5ò#!($0J$J{$AÆ$5%0>%0o%S %>ô%#3&PW&<¨&2å&Z's'C'3Ô'3($<('a(*‰(&´()Û(#)#))-M)&{)K¢)Fî)/5*>e*<¤*Ká*)-+LW+L¤+>ñ+H0,>y,&¸,ß,6ý,44-2i-Oœ-)ì-).8@.Ey.:¿.ú.&/rA//´/Qä/360-j0*˜0QÃ011*,1#W1X{1,Ô1*2,,2?Y2_Se5IqR3 ^<:!GWlOYBbJ.)nj#imHU-&h6fgo?]aN@L V=7%pZ/*Q dE+A (M;`F0,[' $PT>29CK"D41k8 c\X"gradient" attribute missing"hessian" attribute missing%d row with zero weights not counted%d rows with zero weights not counted'Sigma' is not positive definite'W' is not positive definite'X' must be a numeric or complex matrix'anova' is not available for PQL fits'breaks' must be strictly increasing'center' is not the right length'coef' must define a contrast, i.e., sum to 0'coef' must have same length as 'contrast.obj''data' has no 'terms' attribute'densfun' must be supplied as a function or name'family' not recognized'h' must be strictly positive'init' method is unknown'method' is unknown'nbins' must result in a positive integer'newdata' is not of the right length'nu' must exceed 2'object' has no 'call' component. Updating not possible'prior' is of incorrect length'start' is not of the correct length'start' must be a named list'theta' must be given'theta' must be specified'use.start' cannot be used with R's version of 'glm''x' and 'y' must have the same number of rows'x' contains missing or infinite values'x' has length zero'x' is not a matrix'x' must be a non-empty numeric vector'y' must be a matrixAIC is -infinity for this model, so 'stepAIC' cannot proceedAIC is not defined for this model, so 'stepAIC' cannot proceedAIC undefined for REML fitExponential values must be >= 0F test assumes 'quasi%s' familyInfs not allowed in 'd'Initial estimate: %sInitial value for 'theta': %fParameter:Waiting for profiling to be done...Weibull values must be > 0all frequencies are zeroall variables must be factorsan initial configuration must be supplied if there are NAs in 'd'anova is not implemented for a single "polr" objectat least 3 distinct 'x' values are neededat least one column has IQR 0attempt to find suitable starting values failedcolumns of 'contrast.obj' must define a contrast (sum to zero)data vectors must be the same lengthdim(W) is not correctdistances must be result of 'dist' or a square matrixeach element of '%s' must be logicalempty row or column in tablefactors in 'newdata' do not match those for 'object'frequency table is %d-dimensionalgamma values must be >= 0group means are numerically identicalinfinite, NA or NaN values in 'x'invalid 'prior'invalid 'test.vec'invalid initial configurationinvalid length(d)invalid number of levelsinvalid row(x)invalid sizeinvalid table specificationiteration limit reachedlength of 'weights' must equal number of observationslength of 'wt' must equal number of observationsminimum occurred at one end of the rangemissing or infinite values are not allowedmissing or infinite values in 'x'missing or infinite values in the data are not allowedmissing values are not allowedneed positive values to fit a log-Normalnegative values not allowed for the negative binomial familynegative weights not allowedno 'addterm' method implemented for "mlm" modelsno solution in the specified range of bandwidthsno terms in scopenot a "polr" objectnot all objects are of class "negbin"not all objects are of class "polr"nrow(x) and length(grouping) are differentnumber of rows in use has changed: remove missing values?object not of class "lda"object not of class "qda"only 'REML = FALSE' is implementedonly Chi-squared LR tests are implementedonly finite values are allowed in 'lims'optimization failedpackage 'nlme' is essentialprofiling has found a better solution, so original fit had not convergedresponse must be a factorresponse must have 3 or more levelsresponse variable must be positivesome group is too small for 'qda'some of ... do not matchtests made without re-estimating 'theta'theta =theta.ml: iterunimplemented method: %sunsupported distributionvariable names in 'newdata' do not match those in 'object'variables are collinearweighted fits are not supportedwrong number of variableszero or negative distance between objects %d and %dProject-Id-Version: MASS 7.3-22 POT-Creation-Date: 2013-03-18 09:49 PO-Revision-Date: 2013-03-11 13:47-0600 Last-Translator: Chel Hee Lee Language-Team: R Development Translation Teams (Korean) Language: ko MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=1; plural=0; X-Poedit-Language: Korean X-Poedit-Country: KOREA, REPUBLIC OF X-Poedit-SourceCharset: utf-8 "gradient" ì†ì„±ì´ 빠져있습니다"hessian" ì†ì„±ì´ 빠져있습니다가중치를 가지지 않는 %dí–‰ì€ ì„¸ì–´ì§€ì§€ 않습니다'Sigma'는 positive definiteê°€ 아닙니다'W'는 positive definiteê°€ 아닙니다'X'는 반드시 수치형 ë˜ëŠ” 복소수형 행렬ì´ì–´ì•¼ 합니다PQL ì í•©ì— 'anova'는 사용가능하지 않습니다'breaks'는 반드시 ì¦ê°€ë§Œì„ 해야 합니다'center'ì˜ ê¸¸ì´ê°€ 올바르지 않습니다'coef'는 반드시 í•©ì´ 0ì´ë˜ë„ë¡ contrast를 ì •ì˜í•´ì•¼ 합니다'coef'는 반드시 'contrast.obj'와 ê°™ì€ ê¸¸ì´ë¥¼ 가져야 합니다'data'는 'terms'ë¼ëŠ” ì†ì„±ì´ 없습니다'densfun'ì€ ë°˜ë“œì‹œ 함수 ë˜ëŠ” ì´ë¦„으로서 주어져야 합니다ì¸ì‹í•  수 없는 'family'입니다'h'는 반드시 양수ì´ì–´ì•¼ë§Œ 합니다'init' 메소드를 알 수 없습니다'method'를 알 수 없습니다'nbins'는 반드시 ê²°ê³¼ì ìœ¼ë¡œ ì–‘ì˜ ì •ìˆ˜ë¥¼ 주어야 합니다'newdata'ì˜ ê¸¸ì´ê°€ 올바르지 않습니다'nu'는 반드시 2를 넘어야 합니다'object'ê°€ 'call' 요소를 가지고 있지 않습니다. 가능하지 ì•Šì€ ì—…ë°ì´íŠ¸ìž…ë‹ˆë‹¤'prior'ì˜ ê¸¸ì´ê°€ 잘못ë˜ì—ˆìŠµë‹ˆë‹¤'start'ì˜ ê¸¸ì´ê°€ 올바르지 않습니다'start'는 반드시 named listì´ì–´ì•¼ 합니다'theta'는 반드시 주어져야 합니다'theta'는 반드시 지정ë˜ì–´ì•¼ 합니다'use.start'는 Rì˜ 'glm'버전과 함께 ì“°ì¼ ìˆ˜ 없습니다'x'와 'y'는 반드시 í–‰ì˜ ê°œìˆ˜ì™€ 같아야 합니다'x'는 결측값 ë˜ëŠ” 무한한 ê°’ë“¤ì„ í¬í•¨í•˜ê³  있습니다'x'ì˜ ê¸¸ì´ê°€ 0입니다'x'는 í–‰ë ¬ì´ ì•„ë‹™ë‹ˆë‹¤'x'는 반드시 비어있지 ì•Šì€ ìˆ˜ì¹˜í˜• 벡터ì´ì–´ì•¼ 합니다'y'는 반드시 행렬ì´ì–´ì•¼ í•©ë‹ˆë‹¤ì´ ëª¨ë¸ì— 대한 AICê°€ ìŒì˜ ë¬´í•œê°’ì„ ê°€ì§€ë¯€ë¡œ 'stepAIC'를 구할 수 ì—†ìŠµë‹ˆë‹¤ì´ ëª¨ë¸ì— 대해서 AIC는 ì •ì˜ë˜ì–´ 있지 않아 'stepAIC'를 구할 수 없습니다REML ì í•©ì— ì •ì˜ë˜ì§€ ì•Šì€ AIC입니다Exponenital values는 반드시 0보다 í¬ê±°ë‚˜ 같아야 합니다F 테스트는 'quasi%s' 페밀리ë¼ëŠ” ê°€ì •í•˜ì— ìˆ˜í–‰ë©ë‹ˆë‹¤'d'ì— í—ˆìš©ë˜ì§€ 않는 Inf입니다초기 추정치는 다ìŒê³¼ 같습니다: %s다ìŒì€ 'theta'ì— ëŒ€í•œ 초기값입니다: %f파ë¼ë¯¸í„°:프로파ì¼ë§ì´ 완료ë˜ê¸¸ 기다리는 중입니다...Weibull values는 반드시 0 보다 커야 합니다모든 빈ë„ìˆ˜ë“¤ì´ 0입니다모든 ë³€ìˆ˜ë“¤ì´ ìš”ì¸í˜•ì´ì–´ì•¼ 합니다'd'ì— NAê°€ 있다면 ì´ˆê¸°ì„¤ì •ì´ ë°˜ë“œì‹œ 제공ë˜ì–´ì•¼ 합니다anova는 ë‹¨ì¼ "polr" ê°ì²´ì— 구현ë˜ì–´ 있지 않습니다ì ì–´ë„ 3ê°œì˜ ë‹¤ë¥¸ 'x'ê°’ë“¤ì´ í•„ìš”í•©ë‹ˆë‹¤ì ì–´ë„ í•˜ë‚˜ì˜ ì—´ì´ IQR 0ì„ ê°€ì§‘ë‹ˆë‹¤ì ì ˆí•œ 시작ì ë“¤ì„ 찾지 못했습니다'contrast.obj'ì˜ ì—´ë“¤ì€ í•©ì´ 0ì´ë˜ë„ë¡ contrast를 ì •ì˜í•´ì•¼ 합니다ë°ì´í„° ë²¡í„°ë“¤ì€ ë°˜ë“œì‹œ ê°™ì€ ê¸¸ì´ì—¬ì•¼ 합니다dim(W)ì´ ì •í™•í•˜ì§€ 않습니다distances는 반드시 'dist'ì˜ ê²°ê³¼ì´ê±°ë‚˜ 정방행렬ì´ì–´ì•¼ 합니다'%s'ì˜ ê° ìš”ì†ŒëŠ” 반드시 논리ì ì´ì–´ì•¼ 합니다테ì´ë¸”ì— í–‰ ë˜ëŠ” ì—´ì´ ë¹„ì–´ 있습니다'newdata'ì— ìžˆëŠ” ìš”ì¸ë“¤ì´ 'object'ì— ìžˆëŠ” ìš”ì¸ë“¤ê³¼ ì¼ì¹˜í•˜ì§€ 않습니다%d-ì°¨ì› ë¶„í• í‘œìž…ë‹ˆë‹¤gamma valuesë“¤ì€ ë°˜ë“œì‹œ 0 보다 í¬ê±°ë‚˜ 같아야 합니다그룹 í‰ê· ë“¤ì´ 수치ì ìœ¼ë¡œ ë™ì¼í•©ë‹ˆë‹¤'x'ì— ë¬´í•œí•œ ê°’, NA ë˜ëŠ” NaNì´ ìžˆìŠµë‹ˆë‹¤ìœ íš¨í•˜ì§€ ì•Šì€ 'prior'입니다유효하지 ì•Šì€ 'test.vec'입니다유효하지 ì•Šì€ ì´ˆê¸° 설정입니다유효하지 ì•Šì€ length(d)입니다유효하지 ì•Šì€ levelì˜ ìˆ˜ìž…ë‹ˆë‹¤ìœ íš¨í•˜ì§€ ì•Šì€ row(x)입니다유효하지 ì•Šì€ í¬ê¸°ìž…니다유효하지 ì•Šì€ í…Œì´ë¸” 지정입니다iteration ì œí•œì— ë„달했습니다'weights'ì˜ ê¸¸ì´ëŠ” 반드시 ê´€ì¸¡ì¹˜ì˜ ê°œìˆ˜ì™€ 같아야 합니다'wt'ì˜ ê¸¸ì´ëŠ” 반드시 ê´€ì¸¡ì¹˜ì˜ ê°œìˆ˜ì™€ 같아야 합니다rangeì˜ ëì—서 ìµœì†Œê°’ì„ ì°¾ì•˜ìŠµë‹ˆë‹¤ê²°ì¸¡ê°’ ë˜ëŠ” 무한한 ê°’ë“¤ì€ í—ˆìš©ë˜ì§€ 않습니다'x'ì— ëˆ„ë½ëœ ê°’ ë˜ëŠ” 무한한 ê°’ë“¤ì´ ìžˆìŠµë‹ˆë‹¤ë°ì´í„°ì— 결측값 ë˜ëŠ” 무한한 ê°’ë“¤ì€ í—ˆìš©ë˜ì§€ ì•ŠìŠµë‹ˆë‹¤ê²°ì¸¡ê°’ë“¤ì€ í—ˆìš©ë˜ì§€ 않습니다log-Normalì„ ì í•©í•˜ê¸° 위해서는 ì–‘ì˜ ê°’ë“¤ì„ í•„ìš”ë¡œ 합니다negative binomial 페밀리ì—서 허용ë˜ì§€ 않는 ìŒì˜ 값들입니다ìŒì˜ ê°’ì„ ê°€ì§€ëŠ” 가중치는 허용ë˜ì§€ 않습니다"mlm"모ë¸ë“¤ì„ 위하여 êµ¬í˜„ëœ 'addterm'메소드가 아닙니다주어진 bandwidthsì˜ ë²”ìœ„ë‚´ì— ì†”ë£¨ì…˜ì´ ì—†ìŠµë‹ˆë‹¤scopeì— ì•„ë¬´ëŸ° í•­ì´ ì—†ìŠµë‹ˆë‹¤"polr" ê°ì²´ê°€ 아닙니다모든 ê°ì²´ë“¤ì´ í´ëž˜ìФ "negbin"는 아닙니다모든 ê°ì²´ë“¤ì´ í´ëž˜ìФ "polr"는 아닙니다nrow(x)와 length(grouping)ê°€ 서로 ë‹¤ë¦…ë‹ˆë‹¤ì‚¬ìš©ì¤‘ì¸ í–‰ë“¤ì´ ë³€ê²½ë˜ì—ˆìŠµë‹ˆë‹¤: ê²°ì¸¡ì¹˜ë“¤ì„ ì‚­ì œí• ê¹Œìš”?í´ëž˜ìФ "lda"ê°€ 아닌 ê°ì²´ìž…니다í´ëž˜ìФ "qda"ê°€ 아닌 ê°ì²´ìž…니다오로지 'REML = FALSE'ë§Œì´ êµ¬í˜„ë˜ì–´ 있습니다오로지 Chi-squared LR í…ŒìŠ¤íŠ¸ë“¤ë§Œì´ êµ¬í˜„ë˜ì–´ 있습니다'lims'ì—는 오로지 유한한 값들만 허용ë©ë‹ˆë‹¤ìµœì í™”ì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤íŒ¨í‚¤ì§€ 'nlme'는 필수ì ìž…니다프로파ì¼ë§ì´ ë” ë‚˜ì€ í•´ë¥¼ 찾았으므로 ì´ˆê¸°ì˜ ì í•©ì€ 수렴하지 않았ìŒì„ ì˜ë¯¸í•©ë‹ˆë‹¤response는 반드시 ìš”ì¸ì´ì–´ì•¼ 합니다response는 반드시 세개 ì´ìƒì˜ ë ˆë²¨ë“¤ì„ ê°€ì§€ê³  있어야 합니다종ì†ë³€ìˆ˜ëŠ” 반드시 양수ì´ì–´ì•¼ 합니다ì¼ë¶€ ê·¸ë£¹ì€ 'qda'ì— ë„ˆë¬´ 작습니다...ì˜ ì¼ë¶€ê°€ ì¼ì¹˜í•˜ì§€ 않습니다'theta'를 재추정하지 ì•Šì€ ìƒíƒœì—서 만들어진 테스트들입니다theta =theta.ml: iter구현ë˜ì§€ ì•Šì€ ë©”ì†Œë“œìž…ë‹ˆë‹¤: %sì§€ì›ë˜ì§€ 않는 ë¶„í¬ìž…니다'newdata'ë‚´ì˜ ë³€ìˆ˜ëª…ë“¤ì´ 'object'ë‚´ì˜ ë³€ìˆ˜ëª…ë“¤ê³¼ ì¼ì¹˜í•˜ì§€ ì•ŠìŠµë‹ˆë‹¤ë³€ìˆ˜ë“¤ì´ ê³µì„ í˜•ê´€ê³„ì— ìžˆìŠµë‹ˆë‹¤weighted fits는 ì§€ì›ë˜ì§€ ì•ŠìŠµë‹ˆë‹¤ë³€ìˆ˜ë“¤ì˜ ê°œìˆ˜ê°€ 잘못ë˜ì—ˆìŠµë‹ˆë‹¤%d와 %d ê°ì²´ë“¤ 사ì´ì˜ 거리가 0 ë˜ëŠ” ìŒìˆ˜ìž…니다MASS/inst/po/fr/0000755000176000001440000000000011754561330013054 5ustar ripleyusersMASS/inst/po/fr/LC_MESSAGES/0000755000176000001440000000000011772553043014643 5ustar ripleyusersMASS/inst/po/fr/LC_MESSAGES/R-MASS.mo0000644000176000001440000004327712315222153016144 0ustar ripleyusersÞ•­„éì f‘ø>1Jp-» é ''%Ou$”/¹ é- .8g0‡)¸âú+1])q$›À8Ó +Ed$‚$§Ì<é&<!V4x-­'Û+;E&¨½<Ù>Up°È Ýê &C2]#´ÏèA =b" 3Ã÷)>/\$Œ"±2Ô+*3>^$;Âþ5J$Ot‘¬4Æû!;%U%{/¡Ñ!è& 1ATr„ ¬¹ Õâ%ú5 0V(‡*°!Û6ý46S(г(Ì<õ2 0O € 0” Å &× þ %!#8!\!*x!9£!Ý!÷!N""`")ƒ"(­"Ö"ê"H#,O#|#˜#µ##Ï#"ó#$$!;$]$7v$(®$4×$ %% #% /%=;%y%’%«%P®%aÿ%:a&œ&´&Ô&3î&¯"'rÒ(E)b)H~)QÇ)&*@*_*/~*3®*-â*(+29+"l+?+7Ï+!,=),:g,¢,!·,Ù,9ñ,+-'@-#h-Œ-=©-"ç- ."(.!K.0m.!ž."À.Dã.(/A/0\/;/1É/'û/#0=0 W0Rx0+Ë0÷0!1:31An1&°1,×1&2+2C2^2!s2•2 ©2µ2$Õ2=ú2)83$b3!‡3.©3Ø3Gö3B>4442¶4é4-5'/55W5950Ç5Aø5+:63f6Iš66ä6=7Y7Dn7 ³7;¿7#û78!:8A\8%ž8(Ä8#í8/96A9Px9É9$á9,:3:E: Z:{::«:¼:0Î: ÿ:" ;./;B^;<¡;6Þ;6<'L<6t<*«<IÖ<E =f=1ƒ=Cµ=ù=;>N>7b>(š><Ã>?1?/O?#?+£?IÏ?$@&>@|e@'â@, A57AmA…A\¥A6B!9B[B{B(›B&ÄBLëB#8C(\C<…C&ÂC1éCD#D 3D ADLODœD¸D ÓDmàD‚NEKÑEF.=FlF4ŠF9‘¨xhªO;lƒT„ac_H0 (R’ž)«N‚%{#"s UfoЉ€”¬p¥W­ŸMYˆ!¤2yn|r‹kZ†6©X4=›-—<u>3q¦^]*™Œ@D}b `LIŽœ,Q~‡KS“•v1t GdeA/§7mšJ –…j¢'Cz£EwF˜5.Pig :?\8&B$[¡ +V"%s" link not available for negative binomial family; available links are "identity", "log" and "sqrt""gradient" attribute missing"hessian" attribute missing%d missing observation deleted%d missing observations deleted%d row with zero weights not counted%d rows with zero weights not counted%s does not have both 'qr' and 'y' components'Sigma' is not positive definite'W' is not positive definite'X' must be a numeric or complex matrix'anova' is not available for PQL fits'breaks' do not cover the data'breaks' must be strictly increasing'c' must be at least 1.548 and has been ignored'center' is not the right length'coef' must define a contrast, i.e., sum to 0'coef' must have same length as 'contrast.obj''data' has no 'terms' attribute'densfun' must be supplied as a function or name'dropterm' not implemented for "mlm" fits'family' not recognized'h' must be strictly positive'init' method is unknown'lqs' failed: all the samples were singular'method' is unknown'nbins' must result in a positive integer'newdata' is not of the right length'nu' must exceed 2'object' has no 'call' component. Updating not possible'prior' is of incorrect length'ps' must be at least 'p''quantile' must be at least %d'quantile' must be at most %d'rlm' failed to converge in %d steps'start' is not of the correct length'start' must be a named list'start' specifies names which are not arguments to 'densfun''theta' must be given'theta' must be specified'uin' is too large to fit plot in'use.start' cannot be used with R's version of 'glm''x' and 'y' must have the same number of rows'x' contains missing or infinite values'x' has length zero'x' is not a matrix'x' is probably collinear'x' is singular: singular fits are not implemented in 'rlm''x' must be a non-empty numeric vector'y' must be a matrix0 df terms are changing AICAIC is -infinity for this model, so 'stepAIC' cannot proceedAIC is not defined for this model, so 'stepAIC' cannot proceedAIC undefined for REML fitExponential values must be >= 0F test assumes 'quasi%s' familyInfs not allowed in 'd'Initial estimate: %sInitial fit:Initial value for 'theta': %fIter. %d Alpha: %sParameter:Probable convergence failureRe-fitting to get HessianResponse variable must be positive after additionsWaiting for profiling to be done...Weibull values must be > 0all frequencies are zeroall variables must be factorsalternation limit reachedan initial configuration must be supplied if there are NAs in 'd'an initial configuration must be supplied with NA/Infs in 'd'an intercept is needed and assumedanova is not implemented for a single "polr" objectat least %d cases are neededat least 3 distinct 'x' values are neededat least one column has IQR 0attempt to find suitable starting values failedbandwidths must be strictly positivebiplot is only possible if nf >= 2cannot estimate scale: MAD is zero for this samplecannot have leave-one-out CV with 'newdata'cannot use leave-one-out CV with method %scolumns of 'contrast.obj' must define a contrast (sum to zero)data vectors must be the same lengthdesign appears to be rank-deficient, so dropping some coefsdim(W) is not correctdistances must be result of 'dist' or a square matrixdowneach element of '%s' must be logicalempty row or column in tableestimate truncated at zeroextra arguments discardedfactors in 'newdata' do not match those for 'object'formula specifies no responsefrequency table is %d-dimensionalgamma values must be >= 0group %s is emptygroups %s are emptygroup means are numerically identicalhigher-way table requested. Only 2-way allowedincompatible argumentsinfinite, NA or NaN values in 'x'initial configuration must be completeinvalid 'prior'invalid 'test.vec'invalid initial configurationinvalid length(d)invalid number of levelsinvalid row(x)invalid sizeinvalid table specificationiteration %diteration limit reachediteration limit reached near 'x = %f'length of 'weights' must equal number of observationslength of 'wt' must equal number of observationsminimum occurred at one end of the rangemissing or infinite values are not allowedmissing or infinite values in 'x'missing or infinite values in the data are not allowedmissing values are not allowedmodels were not all fitted to the same size of datasetneed positive values to fit a log-Normalnegative 'weights' valuenegative or non-integer entries in tablenegative values not allowed for the negative binomial familynegative weights not allowedno 'addterm' method implemented for "mlm" modelsno positive weightsno solution in the specified range of bandwidthsno terms in scopeno terms in scope for adding to objectnot a "polr" objectnot all objects are of class "negbin"not all objects are of class "polr"not enough non-missing datanrow(x) and length(grouping) are differentnumber of rows in use has changed: remove missing values?object not of class "lda"object not of class "qda"only %d set, so all sets will be triedonly %d sets, so all sets will be triedonly 'REML = FALSE' is implementedonly Chi-squared LR tests are implementedonly finite values are allowed in 'lims'optimization failedpackage 'nlme' is essentialprofiling has found a better solution, so original fit had not convergedrank = 0: variables are numerically constantrank deficiency in group %sregression apparently linearresponse must be a factorresponse must have 3 or more levelsresponse variable must be positivescope is not a subset of term labelssome group is too small for 'qda'some of ... do not matchsupplying pars for the %s distribution is not supportedtests made without re-estimating 'theta'the contrast defined is empty (has no TRUE elements)theta =theta.ml: itertrying + %strying - %suneven breaks with 'prob = FALSE' will give a misleading plotunimplemented method: %sunsupported distributionupusing the %d/%d row from a combined fitusing the %d/%d rows from a combined fitvariable %s appears to be constant within groupsvariables %s appear to be constant within groupsvariable names in 'newdata' do not match those in 'object'variables are collinearweighted fits are not supportedwrong number of variableszero or negative distance between objects %d and %dProject-Id-Version: MASS 7.2-20 Report-Msgid-Bugs-To: bugs@r-project.org POT-Creation-Date: 2013-03-18 09:49 PO-Revision-Date: 2014-03-18 11:08+0100 Last-Translator: Philippe Grosjean Language-Team: French Language: fr MIME-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-1 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n > 1); X-Generator: Poedit 1.6.4 lien "%s" non disponible pour la famille binomiale négative ; les liens possibles sont "identity", "log" et "sqrt"attribut "gradient" manquantattribut "hessian" manquant%d observation manquante supprimée%d observations manquantes supprimées%d ligne de poids nul non comptabilisée%d lignes de poids nul non comptabilisées%s n'a pas les composantes 'qr' et 'y''Sigma' n'est définie positive'W' n'est pas définie positive'X' doit être une matrice numérique ou complexe'anova' n'est pas disponible pour un ajustement PQL'breaks' ne recouvrent pas toutes les données'breaks' doit être strictement croissant'c' doit valoir au moins 1.548 et il a été ignoré 'center' n'a pas la bonne longueur'coef' doit définir un contraste, sa somme doit donc être nulle'coef' doit être de la même longueur que 'contrast.obj''data' n'a pas d'attribut 'terms''densfun' doit être fourni comme une fonction ou comme un nom'dropterm' n'est pas implémenté pour les ajustements "mlm"'family' non reconnu'h' doit être strictement positifméthode 'init' inconnue'lqs' a échoué : tous les échantillons étaient singuliers'method' est inconnu'nbins' doit renvoyer un entier positif'newdata' n'a pas la bonne longueur'nu' doit être supérieur à 2'object' n'a pas de composante 'call'. Mise à jour impossible'prior' est de longueur incorrecte'ps' doit valoir au moins 'p''quantile' doit valoir au moins %d'quantile' doit valoir au plus %dla convergence de 'rlm' a échoué après %d étapes'start' n'a pas la bonne longueur'start' doit être une liste nommée'start' spécifie des noms qui ne sont pas des arguments de 'densfun''theta' doit être fourni'theta' doit être spécifié'uin' est trop grand pour y ajuster le graphique'use.start' n'est pas utilisable dans la version R de 'glm''x' et 'y' doivent avoir le même nombre de lignesvaleurs manquantes ou infinies dans 'x''x' est de longueur nulle'x' n'est pas une matrice'x' est probablement collinéaire'x' est singulière : les ajustements singuliers ne sont pas implémentés dans 'rlm''x' doit être un vecteur numérique non vide'y' doit être une matriceles termes à 0 ddl changent l'AICAIC vaut -Inf pour ce modèle, 'stepAIC' ne peut poursuivreAIC n'est pas défini pour ce modèle, 'stepAIC' ne peut poursuivreAIC non défini pour un ajustement REMLLes valeurs exponentielles doivent être >= 0le test F suppose la famille 'quasi%s'Infs interdits dans 'd'Estimations initiales : %sAjustement initial :Valeur initiale pour 'theta' : %fItér. %d Alpha : %sParamètre :Absence probable de convergenceRéajustement pour obtenir le HessienLa variable de réponse doit être positive après les additionsAttente de la réalisation du profilage...Les valeurs Weibull doivent être > 0toutes les fréquences sont nullestoutes les variables doivent être des facteurslimite d'alternation atteinteune configuration initiale doit être fournie si il y a des NAs dans 'd'une configuration initiale doit être fournie avec NA/Infs dans 'd'une coordonnée à l'origine est nécessaire et assuméel'anova n'est pas implémentée pour un objet "polr"il faut au moins %d casil faut au moins 3 valeurs différentes de 'x'au moins une des colonnes a un EIQ de 0la recherche de valeurs de départ correctes a échouéeles largeurs de bandes doivent être strictement positivesle biplot est possible seulement lorsque nf >= 2impossible d'estimer l'échelle : MAD est nul pour cet échantillonVC par eustachage impossible avec 'newdata'impossible d'utiliser la VC par eustachage pouri %sles colonnes de 'contrast.obj' doivent définir un contraste (somme nulle)les vecteurs de données doivent avoir la même longueurle plan ne semble pas de rang plein, des coefs seront ignorésdim(W) est incorrectles distances doivent provenir de 'dist', ou être une matrice carréevers le bastous les éléments de '%s' doivent être des valeurs logiquesligne ou colonne vide dans la tableestimation tronquée à zéroarguments supplémentaires ignorésles facteurs de 'newdata' ne correspondent pas à ceux de 'object'la formule ne spécifie aucune réponsele tableau de fréquences a %d dimensionsLes valeurs gamma doivent être >= 0le groupe %s est videles groupes %s sont videsles moyennes par groupes sont numériquement identiquesnombre de dimensions de la table insuffisant. Seuls deux dimensions sont admisesarguments incompatiblesvaleurs infinies, NA ou NaN dans 'x'la configuration initiale doit être complète'prior' incorrect'test.vec' incorrectconfiguration initale incorrectelength(d) incorrectnombre de niveaux incorrectrow(x) incorrecttaille incorrectespécification de table de contingence incorrecteitération %dnombre limite d'iterations atteintnombre limite d'iterations atteint vers x = %fla longueur de 'weights' doit être égale au nombre d'observations la longueur de 'wt' doit être égale au nombre d'observationsle minimum est atteint à une extrémité de l'intervalleles valeurs manquantes ou infinies ne sont pas admisesvaleurs manquantes ou infinies dans 'x'les valeurs manquantes ou infinies ne sont pas admisesles valeurs manquantes ne sont pas admisesles modèles n'ont pas été ajustés à des jeux de données de même dimensiondes valeurs positives sont requises pour ajuster un modèle log-Normalvaleur de 'weights' négativevaleurs négatives ou non entières dans le tableauvaleurs négatives non autorisées pour la famille binomiale négativepoids négatifs non admispas de méthode 'addterm' implémentée pour les modèles" mlm"aucun poids positifpas de solution dans l'intervalle de largeur de classesaucun terme dans la portée de la formuleaucun terme dans la portée de la formule à ajouter à l'objetce n'est pas un objet "polr"tous les objets ne sont pas de la classe "negbin"tous les objets ne sont pas de la classe "polr"pas assez de données non manquantesnrow(x) et length(grouping) sont différentsle nombre de lignes utilisées a changé : retirer les valeurs manquantes ?l'objet n'est pas de la classe "lda"cet objet n'est pas de la calsse "qda"seulement %d ensemble, donc tous les ensembles seront essayésseulement %d ensembles, donc tous les ensembles seront essayésseulement 'REML = FALSE' est implémentéseuls les tests Chi-deux LR sont implémentésseules des valeurs finies sont autorisées pour 'lims'l'optimisation a échouéle package 'nlme' est essentielle profilage a donné une meilleure solution, l'ajustement original n'avait donc pas convergérank = 0 : les variables sont numériquement constantesgroupe %s n'est pas de rang pleinregression apparemment linéairela réponse doit être un facteurla réponse doit avoir au moins 3 niveauxla variable réponse doit être positivela portée de la formule n'est pas un sous-ensemble des étiquettes des termesun groupe est trop petit pour 'qda'des éléments de ... ne correspondent pasfixer des paramètres pour distribution %s n'est pas supportétests réalisés sans ré-estimer 'theta'le contraste défini est vide (aucun élément TRUE)theta =theta.lm : itéressai de + %sessai de - %sdes limites irrégulières avec 'prob = FALSE' donneront un graphique trompeurméthode non implémentée: %sdistribution non supportéevers le haututilisation de %d/%d ligne pour un ajustement combinéutilisation des %d/%d lignes pour un ajustement combinéla variable %s semble être constante à l'intérieur des groupesles variables %s semblent être constantes à l'intérieur des groupesles noms des variables de 'newdata' ne correspondent pas à ceux de 'object'les variables sont collinéairesles ajustements pondérés ne sont pas supportésnombre de variables incorrectdistance négative ou nulle entre les objets %d et %dMASS/inst/po/de/0000755000176000001440000000000011754561330013035 5ustar ripleyusersMASS/inst/po/de/LC_MESSAGES/0000755000176000001440000000000012023424654014617 5ustar ripleyusersMASS/inst/po/de/LC_MESSAGES/R-MASS.mo0000644000176000001440000004326412315222153016121 0ustar ripleyusersÞ•­„éì f‘ø>1Jp-» é ''%Ou$”/¹ é- .8g0‡)¸âú+1])q$›À8Ó +Ed$‚$§Ì<é&<!V4x-­'Û+;E&¨½<Ù>Up°È Ýê &C2]#´ÏèA =b" 3Ã÷)>/\$Œ"±2Ô+*3>^$;Âþ5J$Ot‘¬4Æû!;%U%{/¡Ñ!è& 1ATr„ ¬¹ Õâ%ú5 0V(‡*°!Û6ý46S(г(Ì<õ2 0O € 0” Å &× þ %!#8!\!*x!9£!Ý!÷!N""`")ƒ"(­"Ö"ê"H#,O#|#˜#µ##Ï#"ó#$$!;$]$7v$(®$4×$ %% #% /%=;%y%’%«%P®%aÿ%:a&œ&´&Ô&3î&Ÿ"'yÂ(<)V)Eo)Rµ)2*#;*_*2*1²*!ä*$+2++&^+7…+7½+ õ+;,5R,ˆ,Ÿ,¼,6Ù,-8'-'`-ˆ-C£-ç-."$."G.'j.%’.%¸.>Þ./9/=W/A•/7×/+0;0O0 d0K…02Ñ011H=1S†1#Ú1!þ1* 2K2e22#“2·2 Ê2/Õ2)37/3(g33¯3$É3"î3H4FZ41¡4GÓ4%5/A5 q59’5'Ì5%ô5E6D`6I¥6Kï6.;7\j7Ç7Kà7,8)/8"Y8'|8 ¤8;Å8!9"#9F9'c9+‹9=·9õ9$ :+1:]:q:†:¦:»:Õ:ç:ú: ;!;);;Be;=¨;.æ;1<%G<>m<!¬<TÎ<Z#=~=4—=>Ì= >8+>d>4}>$²>A×>?2,?-_? ?1®?Ià?!*@!L@`n@$Ï@?ô@)4A^AyAe—A+ýA)BCB!aB*ƒB'®BAÖB)CBCD^C4£C:ØCDD *D 8D\FD £DÄDãDxçDw`E?ØEF01FbF9zF9‘¨xhªO;lƒT„ac_H0 (R’ž)«N‚%{#"s UfoЉ€”¬p¥W­ŸMYˆ!¤2yn|r‹kZ†6©X4=›-—<u>3q¦^]*™Œ@D}b `LIŽœ,Q~‡KS“•v1t GdeA/§7mšJ –…j¢'Cz£EwF˜5.Pig :?\8&B$[¡ +V"%s" link not available for negative binomial family; available links are "identity", "log" and "sqrt""gradient" attribute missing"hessian" attribute missing%d missing observation deleted%d missing observations deleted%d row with zero weights not counted%d rows with zero weights not counted%s does not have both 'qr' and 'y' components'Sigma' is not positive definite'W' is not positive definite'X' must be a numeric or complex matrix'anova' is not available for PQL fits'breaks' do not cover the data'breaks' must be strictly increasing'c' must be at least 1.548 and has been ignored'center' is not the right length'coef' must define a contrast, i.e., sum to 0'coef' must have same length as 'contrast.obj''data' has no 'terms' attribute'densfun' must be supplied as a function or name'dropterm' not implemented for "mlm" fits'family' not recognized'h' must be strictly positive'init' method is unknown'lqs' failed: all the samples were singular'method' is unknown'nbins' must result in a positive integer'newdata' is not of the right length'nu' must exceed 2'object' has no 'call' component. Updating not possible'prior' is of incorrect length'ps' must be at least 'p''quantile' must be at least %d'quantile' must be at most %d'rlm' failed to converge in %d steps'start' is not of the correct length'start' must be a named list'start' specifies names which are not arguments to 'densfun''theta' must be given'theta' must be specified'uin' is too large to fit plot in'use.start' cannot be used with R's version of 'glm''x' and 'y' must have the same number of rows'x' contains missing or infinite values'x' has length zero'x' is not a matrix'x' is probably collinear'x' is singular: singular fits are not implemented in 'rlm''x' must be a non-empty numeric vector'y' must be a matrix0 df terms are changing AICAIC is -infinity for this model, so 'stepAIC' cannot proceedAIC is not defined for this model, so 'stepAIC' cannot proceedAIC undefined for REML fitExponential values must be >= 0F test assumes 'quasi%s' familyInfs not allowed in 'd'Initial estimate: %sInitial fit:Initial value for 'theta': %fIter. %d Alpha: %sParameter:Probable convergence failureRe-fitting to get HessianResponse variable must be positive after additionsWaiting for profiling to be done...Weibull values must be > 0all frequencies are zeroall variables must be factorsalternation limit reachedan initial configuration must be supplied if there are NAs in 'd'an initial configuration must be supplied with NA/Infs in 'd'an intercept is needed and assumedanova is not implemented for a single "polr" objectat least %d cases are neededat least 3 distinct 'x' values are neededat least one column has IQR 0attempt to find suitable starting values failedbandwidths must be strictly positivebiplot is only possible if nf >= 2cannot estimate scale: MAD is zero for this samplecannot have leave-one-out CV with 'newdata'cannot use leave-one-out CV with method %scolumns of 'contrast.obj' must define a contrast (sum to zero)data vectors must be the same lengthdesign appears to be rank-deficient, so dropping some coefsdim(W) is not correctdistances must be result of 'dist' or a square matrixdowneach element of '%s' must be logicalempty row or column in tableestimate truncated at zeroextra arguments discardedfactors in 'newdata' do not match those for 'object'formula specifies no responsefrequency table is %d-dimensionalgamma values must be >= 0group %s is emptygroups %s are emptygroup means are numerically identicalhigher-way table requested. Only 2-way allowedincompatible argumentsinfinite, NA or NaN values in 'x'initial configuration must be completeinvalid 'prior'invalid 'test.vec'invalid initial configurationinvalid length(d)invalid number of levelsinvalid row(x)invalid sizeinvalid table specificationiteration %diteration limit reachediteration limit reached near 'x = %f'length of 'weights' must equal number of observationslength of 'wt' must equal number of observationsminimum occurred at one end of the rangemissing or infinite values are not allowedmissing or infinite values in 'x'missing or infinite values in the data are not allowedmissing values are not allowedmodels were not all fitted to the same size of datasetneed positive values to fit a log-Normalnegative 'weights' valuenegative or non-integer entries in tablenegative values not allowed for the negative binomial familynegative weights not allowedno 'addterm' method implemented for "mlm" modelsno positive weightsno solution in the specified range of bandwidthsno terms in scopeno terms in scope for adding to objectnot a "polr" objectnot all objects are of class "negbin"not all objects are of class "polr"not enough non-missing datanrow(x) and length(grouping) are differentnumber of rows in use has changed: remove missing values?object not of class "lda"object not of class "qda"only %d set, so all sets will be triedonly %d sets, so all sets will be triedonly 'REML = FALSE' is implementedonly Chi-squared LR tests are implementedonly finite values are allowed in 'lims'optimization failedpackage 'nlme' is essentialprofiling has found a better solution, so original fit had not convergedrank = 0: variables are numerically constantrank deficiency in group %sregression apparently linearresponse must be a factorresponse must have 3 or more levelsresponse variable must be positivescope is not a subset of term labelssome group is too small for 'qda'some of ... do not matchsupplying pars for the %s distribution is not supportedtests made without re-estimating 'theta'the contrast defined is empty (has no TRUE elements)theta =theta.ml: itertrying + %strying - %suneven breaks with 'prob = FALSE' will give a misleading plotunimplemented method: %sunsupported distributionupusing the %d/%d row from a combined fitusing the %d/%d rows from a combined fitvariable %s appears to be constant within groupsvariables %s appear to be constant within groupsvariable names in 'newdata' do not match those in 'object'variables are collinearweighted fits are not supportedwrong number of variableszero or negative distance between objects %d and %dProject-Id-Version: R 3.1.0 / MASS 7.3-30 Report-Msgid-Bugs-To: bugs@r-project.org POT-Creation-Date: 2013-03-18 09:49 PO-Revision-Date: 2014-03-16 16:32+0100 Last-Translator: Detlef Steuer Language-Team: German Language: de MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); Link "%s" nicht für negative binomische Familie verfügbar. Verfügbare Linkfunktionen sind "identity", "log" und "sqrt""gradient"-Attribut fehlt"hessian"-Attribut fehlt%d fehlende Beobachtung gelöscht%d fehlende Beobachtungen gelöscht%d Zeile mit Gewicht Null nicht gezählt%d Zeilen mit Gewicht Null nicht gezählt%s hat nicht sowohl 'qr'- als auch 'y'-Komponenten'Sigma' ist nicht positiv definiert'W' ist nicht positiv definiert'X' muss eine numerische oder komplexe Matrix sein'anova' ist nicht für PQL-Näherungen verfügbar'breaks' deckt die Daten nicht ab'breaks' muss strikt ansteigend sein'c' muss mindestens 1.548 sein und wurde ignoriert'center' ist nicht die richtige Länge'coef' muss einen Kontrast definieren, d.h. Summe auf 0'coef' muss die gleiche Länge wie 'contrast.obj' haben'data' hat kein 'terms'-Attribut'densfun' muss als eine Funktion oder Name angegeben werden'dropterm' nicht für "mlm"-Anpassungen implementiert'family' nicht erkannt'h' muss strikt positiv sein'init'-Methode ist unbekannt'lqs' fehlgeschlagen: Alle Stichproben waren singulär'method' ist unbekannt'nbins' muss als Ergebnis eine positive Ganzzahl liefern'newdata' hat nicht die richtige Länge'nu' muss 2 überschreiten'object' hat keine 'call'-Komponente. Aktualisierung nicht möglich'prior' hat fehlerhafte Länge'ps' muss mindestens 'p' sein'quantile' muss mindestens %d sein'quantile' darf höchstens %d sein'rlm' nicht in %d Schritten konvergiert'start' hat nicht die richtige Länge'start' muss eine benannte Liste sein'start' gibt Namen an, die keine Argumente für 'densfun' sind'theta' muss angegeben sein'theta' muss angegeben werden'uin' ist zu groß, um in die grafische Darstellung zu passen'use.start' kann nicht mit der R-Version von 'glm' benutzt werden'x' und 'y' müssen die gleiche Anzahl von Zeilen haben'x' enthält fehlende oder unendliche Werte'x' hat Länge Null'x' ist keine Matrix'x' ist wahrscheinlich kollinear'x' ist singulär: singuläre Anpassungen sind in 'rlm' nicht implementiert'x' muss ein nicht-leerer, numerischer Vektor sein'y' muss eine Matrix sein0 df-Bedinungen verändern AICAIC ist -Inf für dieses Modell, deshalb kann 'stepAIC' nicht fortfahrenAIC ist für dieses Modell nicht definiert, deshalb kann 'stepAIC' nicht fortfahrenAIC für REML-Näherung undefiniertExponential Wert müssen > 0 seinF-Test unterstellt eine 'quasi-%s'-FamilieInfs in 'd' nicht erlaubtAnfängliche Schätzung: %sAnfangsanpassung:Anfänglicher Wert für 'theta': %fIter. %d Alpha: %sParameter:Wahrscheinlich konvergierte das Verfahren nichtNeuanpassung um Hesse-Matrix zu bestimmenRückmeldungsvariable muss nach Additionen positiv seinEs wird auf das Profilieren gewartet ...Weibull Werte müssen > 0 seinalle Frequenzen sind Nullalle Variablen müssen Faktoren seinGrenze der Alternierungen erreichtes muss eine Anfangskonfiguration geliefert werden, wenn NAs in 'd' sindeine anfängliche Einrichtung muss mit NA/Infs in 'd' geliefert werdenein Schnittpunkt wird benötigt und vorausgesetztVarianzanalyse ist nicht für ein einzelnes "polr"-Objekt implementiertmindestens %d Fälle werden benötigtmindestens 3 verschiedene 'x'-Werte sind nötigmindestens eine Spalte hat IQR 0Versuch, geeignete Anfangswerte zu finden, fehlgeschlagenBandbreiten müssen strikt positiv seinBiplot ist nur möglich, wenn nf >= 2Skala kann nicht geschätzt werden: MAD ist in dieser Stichprobe Nullkann mit 'newdata' keine leave-one-out Kreuzvalidierung durchführenleave-one-out-Kreuzvalidierung kann nicht bei Methode %s angewandt werdenSpalten von 'contrast.obj' müssen einen Kontrast definieren (Summen auf 0)Datenvektoren müssen die gleiche Länge habenEntwurf scheint Rang-defizitär zu sein, deshalb werden einige Koeffizienten fallen gelassendim(W) ist nicht richtigDistanzen müssen das Ergebnis einer 'dist'- oder quadratischen Matrix seinabjedes Element von '%s' muss boolesch seinLeere Zeile oder Spalte in TabelleSchätzung wurde auf Null abgeschnittenzusätzliche Argumente verworfenFaktoren in 'newdata' entsprechen nicht denen für 'Object'Formel gibt keine Rückmeldung anFrequenztabelle ist %d-dimensionalgamma Werte müssen > 0 seinGruppe %s ist leerGruppen %s sind leerGruppenmittelwerte sind numerisch identischMehr als zwei-wege Tabelle angefordert, nur zwei-wege erlaubtinkompatible Argumenteunendlich, NA- oder NaN-Werte in 'x'Anfangskonfiguration muss vollständig seinungültiger 'prior''test.vec' ungültigungültige Anfangskonfigurationungültige length(d)falsche Anzahl der Stufenungültige row(x)ungültige Größeungültige TabellenangabeIteration %dIterationsgrenze erreichtIterationsgrenzwert erreicht bei 'x = %f'Länge von 'weights' muss der Anzahl der Beobachtungen entsprechenLänge von 'wt' muss der Anzahl der Beobachtungen entsprechenMinimum an einem Ende des Bereichs aufgetretenfehlende oder unendliche Werte sind nicht erlaubtfehlende oder unendliche Werte in 'x'fehlende oder unendliche Werte in den Daten sind nicht erlaubtfehlende Werte sind nicht erlaubtnicht alle Modelle wurden an die gleiche Größe, wie die des Datensatzes, angepasstes werden positive Werte benötigt, um an eine logarithmische Normalverteilung anzunähernnegativer 'weights'-Wertnegative oder nicht ganzzahlige Einträge in Tabellekeine negativen Werte für negative binomische Familie erlaubtnegative Gewichte nicht erlaubtkeine 'addterm'-Methode für "mlm"-Modelle implementiertkeine positiven Gewichtekeine Lösung im angegebenen Bereich der Bandbreitenkeine Bedingungen im Geltungsbereichkeine Bedingungen im Geltungsbereich, um ein Objekt hinzuzufügenkein "polr"-Objektnicht alle Objekte gehören der Klasse "negbin" annicht alle Objekte gehören zur Klasse "polr"nicht genug nicht-fehlende Datennrow(x) und length(grouping) sind unterschiedlichAnzahl der benutzten Zeilen hat sich geändert: Fehlende Werte entfernen?Objekt nicht aus der Klasse "lda"Objekt nicht aus der Klasse "qda"nur %d Menge, daher werden alle Mengen getestetnur %d Mengen, daher werden alle Mengen getestetnur 'REML = FALSE' ist implementiertnur LR-Tests der quadratische Chi-Verteilung sind implementiertnur endliche Werte sind in 'lims' erlaubtOptimierung fehlgeschlagenPaket 'nlme' ist erforderlichProfilieren hat eine bessere Lösung gefunden, deshalb konvergierte die urspüngliche Anpassung nichtrank = 0: Variablen sind numerisch konstantRang-Defizit in Gruppe %sRegression anscheinend linearRückmeldung muss ein Faktor seinRückmeldung muss 3 oder mehr Stufen habenRückmeldungsvariable muss positiv seinGeltungsbereich ist keine Untermenge von Bedingungsbeschriftungenirgendeine Gruppe ist für 'qda' zu kleinmanches von ... passt nichtAngabe von Parametern wird für die %s-Verteilung nicht unterstütztTests ohne Neuabschätzung von 'theta' durchgeführtder definierte Kontrast ist leer (hat keine TRUE-Elemente)theta =theta.ml: iterversuche + %sversuche - %sungerade Unterbrechungen mit 'prob = FALSE' ergeben eine irreführende grafische Darstellungnicht implementierte Methode: %snicht unterstützte Verteilungauf%d/%d Zeile wird von einer kombinierten Anpassung benutzt.%d/%d Zeilen werden von einer kombinierten Anpassung benutzt.Variable %s scheint innerhalb der Gruppen konstant zu seinVariablen %s scheinen innerhalb der Gruppen konstant zu seinVariablennamen in 'newdata' entsprechen nicht denen in 'Objekt'Variablen sind kollineargewichtete Anpassungen werden nicht unterstütztfalsche VariablenanzahlNull oder negative Entfernung zwischen Objekten %d und %dMASS/inst/CITATION0000644000176000001440000000127411754561330013170 0ustar ripleyuserscitHeader("To cite the MASS package in publications use:") citEntry(entry="Book", title = "Modern Applied Statistics with S", author = personList(as.person("W. N. Venables"), as.person("B. D. Ripley")), publisher = "Springer", edition = "Fourth", address = "New York", year = 2002, note = "ISBN 0-387-95457-0", url = "http://www.stats.ox.ac.uk/pub/MASS4", textVersion = paste("Venables, W. N. & Ripley, B. D. (2002)", "Modern Applied Statistics with S.", "Fourth Edition. Springer, New York. ISBN 0-387-95457-0") )MASS/inst/scripts/0000755000176000001440000000000012620323725013513 5ustar ripleyusersMASS/inst/scripts/ch11.R0000644000176000001440000001563112163245454014404 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 11 Exploratory Multivariate Analysis library(MASS) pdf(file="ch11.pdf", width=8, height=6, pointsize=9) options(width=65, digits=5) # 11.1 Visualization methods # ir <- rbind(iris[,,1], iris[,,2], iris[,,3]) ir <- rbind(iris3[,,1], iris3[,,2], iris3[,,3]) ir.species <- factor(c(rep("s", 50), rep("c", 50), rep("v", 50))) (ir.pca <- princomp(log(ir), cor = TRUE)) summary(ir.pca) plot(ir.pca) ir.pc <- predict(ir.pca) eqscplot(ir.pc[, 1:2], type = "n", xlab = "first principal component", ylab = "second principal component") text(ir.pc[, 1:2], labels = as.character(ir.species), col = 3 + unclass(ir.species)) lcrabs <- log(crabs[, 4:8]) crabs.grp <- factor(c("B", "b", "O", "o")[rep(1:4, each = 50)]) (lcrabs.pca <- princomp(lcrabs)) loadings(lcrabs.pca) lcrabs.pc <- predict(lcrabs.pca) dimnames(lcrabs.pc) <- list(NULL, paste("PC", 1:5, sep = "")) if(FALSE) { # needs interaction with XGobi, or, better, rggobi library(xgobi) xgobi(lcrabs, colors = c("SkyBlue", "SlateBlue", "Orange", "Red")[rep(1:4, each = 50)]) xgobi(lcrabs, glyphs = 12 + 5*rep(0:3, each = 50, 4)) library(rggobi) g <- ggobi(lcrabs) d <- displays(g)[[1]] pmode(d) <- "2D Tour" crabs.grp <- factor(c("B", "b", "O", "o")[rep(1:4, each = 50)]) glyph_colour(g$lcrabs) <- crabs.grp colorscheme(g) <- "Paired 4" } ir.scal <- cmdscale(dist(ir), k = 2, eig = TRUE) ir.scal$points[, 2] <- -ir.scal$points[, 2] eqscplot(ir.scal$points, type = "n") text(ir.scal$points, labels = as.character(ir.species), col = 3 + unclass(ir.species), cex = 0.8) distp <- dist(ir) dist2 <- dist(ir.scal$points) sum((distp - dist2)^2)/sum(distp^2) ir.sam <- sammon(dist(ir[-143,])) eqscplot(ir.sam$points, type = "n") text(ir.sam$points, labels = as.character(ir.species[-143]), col = 3 + unclass(ir.species), cex = 0.8) ir.iso <- isoMDS(dist(ir[-143,])) eqscplot(ir.iso$points, type = "n") text(ir.iso$points, labels = as.character(ir.species[-143]), col = 3 + unclass(ir.species), cex = 0.8) cr.scale <- 0.5 * log(crabs$CL * crabs$CW) slcrabs <- lcrabs - cr.scale cr.means <- matrix(0, 2, 5) cr.means[1,] <- colMeans(slcrabs[crabs$sex == "F", ]) cr.means[2,] <- colMeans(slcrabs[crabs$sex == "M", ]) dslcrabs <- slcrabs - cr.means[as.numeric(crabs$sex), ] lcrabs.sam <- sammon(dist(dslcrabs)) eqscplot(lcrabs.sam$points, type = "n", xlab = "", ylab = "") text(lcrabs.sam$points, labels = as.character(crabs.grp)) fgl.iso <- isoMDS(dist(as.matrix(fgl[-40, -10]))) eqscplot(fgl.iso$points, type = "n", xlab = "", ylab = "", axes = FALSE) # either # for(i in seq(along = levels(fgl$type))) { # set <- fgl$type[-40] == levels(fgl$type)[i] # points(fgl.iso$points[set,], pch = 18, cex = 0.6, col = 2 + i)} # key(text = list(levels(fgl$type), col = 3:8)) # or text(fgl.iso$points, labels = c("F", "N", "V", "C", "T", "H")[fgl$type[-40]], cex = 0.6) fgl.iso3 <- isoMDS(dist(as.matrix(fgl[-40, -10])), k = 3) # S: brush(fgl.iso3$points) fgl.col <- c("SkyBlue", "SlateBlue", "Orange", "Orchid", "Green", "HotPink")[fgl$type] # xgobi(fgl.iso3$points, colors = fgl.col) library(class) gr <- somgrid(topo = "hexagonal") crabs.som <- batchSOM(lcrabs, gr, c(4, 4, 2, 2, 1, 1, 1, 0, 0)) plot(crabs.som) bins <- as.numeric(knn1(crabs.som$code, lcrabs, 0:47)) plot(crabs.som$grid, type = "n") symbols(crabs.som$grid$pts[, 1], crabs.som$grid$pts[, 2], circles = rep(0.4, 48), inches = FALSE, add = TRUE) text(crabs.som$grid$pts[bins, ] + rnorm(400, 0, 0.1), as.character(crabs.grp)) crabs.som2 <- SOM(lcrabs, gr); plot(crabs.som2) state <- state.x77[, 2:7]; row.names(state) <- state.abb biplot(princomp(state, cor = TRUE), pc.biplot = TRUE, cex = 0.7, expand = 0.8) library(fastICA) nICA <- 4 crabs.ica <- fastICA(crabs[, 4:8], nICA) Z <- crabs.ica$S par(mfrow = c(2, nICA)) for(i in 1:nICA) boxplot(Z[, i] ~ crabs.grp) par(mfrow = c(1, 1)) # S: stars(state.x77[, c(7, 4, 6, 2, 5, 3)], byrow = TRUE) stars(state.x77[, c(7, 4, 6, 2, 5, 3)]) parcoord(state.x77[, c(7, 4, 6, 2, 5, 3)]) parcoord(log(ir)[, c(3, 4, 2, 1)], col = 1 + (0:149)%/%50) # 11.2 Cluster analysis swiss.x <- as.matrix(swiss[,-1]) library(cluster) # S: h <- hclust(dist(swiss.x), method = "connected") h <- hclust(dist(swiss.x), method = "single") plot(h) cutree(h, 3) # S: plclust( clorder(h, cutree(h, 3) )) pltree(diana(swiss.x)) par(mfrow = c(1, 1)) h <- hclust(dist(swiss.x), method = "average") initial <- tapply(swiss.x, list(rep(cutree(h, 3), ncol(swiss.x)), col(swiss.x)), mean) dimnames(initial) <- list(NULL, dimnames(swiss.x)[[2]]) km <- kmeans(swiss.x, initial) (swiss.pca <- princomp(swiss.x)) swiss.px <- predict(swiss.pca) dimnames(km$centers)[[2]] <- dimnames(swiss.x)[[2]] swiss.centers <- predict(swiss.pca, km$centers) eqscplot(swiss.px[, 1:2], type = "n", xlab = "first principal component", ylab = "second principal component") text(swiss.px[, 1:2], labels = km$cluster) points(swiss.centers[,1:2], pch = 3, cex = 3) if(interactive()) identify(swiss.px[, 1:2], cex = 0.5) swiss.pam <- pam(swiss.px, 3) summary(swiss.pam) eqscplot(swiss.px[, 1:2], type = "n", xlab = "first principal component", ylab = "second principal component") text(swiss.px[,1:2], labels = swiss.pam$clustering) points(swiss.pam$medoid[,1:2], pch = 3, cex = 3) fanny(swiss.px, 3) ## From the on-line Errata: ## ## `The authors of mclust have chosen to re-use the name for a ## completely incompatible package. We can no longer recommend its ## use, and the code given in the first printing does not work in R's ## mclust-2.x.' ## ## And later mclust was given a restrictive licence, so this example ## has been removed. Finally in 2012 it was given an OpenSource licence. # 11.3 Factor analysis ability.FA <- factanal(covmat = ability.cov, factors = 1) ability.FA (ability.FA <- update(ability.FA, factors = 2)) #summary(ability.FA) round(loadings(ability.FA) %*% t(loadings(ability.FA)) + diag(ability.FA$uniq), 3) if(require("GPArotation")) { # loadings(rotate(ability.FA, rotation = "oblimin")) L <- loadings(ability.FA) print(oblirot <- oblimin(L)) par(pty = "s") eqscplot(L, xlim = c(0,1), ylim = c(0,1)) if(interactive()) identify(L, dimnames(L)[[1]]) naxes <- oblirot$Th arrows(rep(0, 2), rep(0, 2), naxes[,1], naxes[,2]) } # 11.4 Discrete multivariate analysis caith <- as.matrix(caith) names(dimnames(caith)) <- c("eyes", "hair") mosaicplot(caith, color = TRUE) House <- xtabs(Freq ~ Type + Infl + Cont + Sat, housing) mosaicplot(House, color = TRUE) corresp(caith) caith2 <- caith dimnames(caith2)[[2]] <- c("F", "R", "M", "D", "B") par(mfcol = c(1, 3)) plot(corresp(caith2, nf = 2)); title("symmetric") plot(corresp(caith2, nf = 2), type = "rows"); title("rows") plot(corresp(caith2, nf = 2), type = "col"); title("columns") par(mfrow = c(1, 1)) farms.mca <- mca(farms, abbrev = TRUE) # Use levels as names plot(farms.mca, cex = rep(0.7, 2)) # End of ch11 MASS/inst/scripts/ch10.R0000644000176000001440000002136612453175725014412 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 10 Random and Mixed Effects library(MASS) library(lattice) pdf(file="ch10.pdf", width=8, height=6, pointsize=9) options(width=65, digits=5) library(nlme) # 10.1 Linear models xyplot(Y ~ EP | No, data = petrol, xlab = "ASTM end point (deg. F)", ylab = "Yield as a percent of crude", panel = function(x, y) { panel.grid() m <- sort.list(x) panel.xyplot(x[m], y[m], type = "b", cex = 0.5) }) Petrol <- petrol names(Petrol) Petrol[, 2:5] <- scale(Petrol[, 2:5], scale = FALSE) pet1.lm <- lm(Y ~ No/EP - 1, Petrol) matrix(round(coef(pet1.lm), 2), 2, 10, byrow = TRUE, dimnames = list(c("b0", "b1"), levels(Petrol$No))) pet2.lm <- lm(Y ~ No - 1 + EP, Petrol) anova(pet2.lm, pet1.lm) pet3.lm <- lm(Y ~ SG + VP + V10 + EP, Petrol) anova(pet3.lm, pet2.lm) pet3.lme <- lme(Y ~ SG + VP + V10 + EP, random = ~ 1 | No, data = Petrol) summary(pet3.lme) pet3.lme <- update(pet3.lme, method = "ML") summary(pet3.lme) anova(pet3.lme, pet3.lm) pet4.lme <- update(pet3.lme, fixed = Y ~ V10 + EP) anova(pet4.lme, pet3.lme) fixed.effects(pet4.lme) coef(pet4.lme) pet5.lme <- update(pet4.lme, random = ~ 1 + EP | No) anova(pet4.lme, pet5.lme) nl1 <- nlschools attach(nl1) classMeans <- tapply(IQ, class, mean) nl1$IQave <- classMeans[as.character(class)] detach() cen <- c("IQ", "IQave", "SES") nl1[cen] <- scale(nl1[cen], center = TRUE, scale = FALSE) options(contrasts = c("contr.treatment", "contr.poly")) nl.lme <- lme(lang ~ IQ*COMB + IQave + SES, random = ~ IQ | class, data = nl1) summary(nl.lme) ## singular.ok = TRUE is the default in R summary(lm(lang ~ IQ*COMB + SES + class, data = nl1, singular.ok = TRUE)) nl2 <- cbind(aggregate(nl1[c(1,7)], list(class = nl1$class), mean), unique(nl1[c("class", "COMB", "GS")])) summary(lm(lang ~ IQave + COMB, data = nl2, weights = GS)) sitka.lme <- lme(size ~ treat*ordered(Time), random = ~1 | tree, data = Sitka, method = "ML") Sitka <- Sitka # make a local copy for S-PLUS attach(Sitka) Sitka$treatslope <- Time * (treat == "ozone") detach() sitka.lme2 <- update(sitka.lme, fixed = size ~ ordered(Time) + treat + treatslope) anova(sitka.lme, sitka.lme2) # fitted curves matrix(fitted(sitka.lme2, level = 0)[c(301:305, 1:5)], 2, 5, byrow = TRUE, dimnames = list(c("control", "ozone"), unique(Sitka$Time))) # 10.2 Classic nested designs if(FALSE) { summary(raov(Conc ~ Lab/Bat, data = coop, subset = Spc=="S1")) is.random(coop) <- T is.random(coop$Spc) <- F is.random(coop) varcomp(Conc ~ Lab/Bat, data = coop, subset = Spc=="S1") varcomp(Conc ~ Lab/Bat, data = coop, subset = Spc=="S1", method = c("winsor", "minque0")) } #oats <- oats # make a local copy: needed in S-PLUS oats$Nf <- ordered(oats$N, levels = sort(levels(oats$N))) oats.aov <- aov(Y ~ Nf*V + Error(B/V), data = oats, qr = TRUE) summary(oats.aov) summary(oats.aov, split = list(Nf = list(L = 1, Dev = 2:3))) plot(fitted(oats.aov[[4]]), studres(oats.aov[[4]])) abline(h = 0, lty = 2) oats.pr <- proj(oats.aov) qqnorm(oats.pr[[4]][,"Residuals"], ylab = "Stratum 4 residuals") qqline(oats.pr[[4]][,"Residuals"]) oats.aov <- aov(Y ~ N + V + Error(B/V), data = oats, qr = TRUE) model.tables(oats.aov, type = "means", se = TRUE) # we can get the unimplemented standard errors from se.contrast(oats.aov, list(N == "0.0cwt", N == "0.2cwt"), data=oats) se.contrast(oats.aov, list(V == "Golden.rain", V == "Victory"), data=oats) # is.random(oats$B) <- T # varcomp(Y ~ N + V + B/V, data = oats) lme(Conc ~ 1, random = ~1 | Lab/Bat, data = coop, subset = Spc=="S1") options(contrasts = c("contr.treatment", "contr.poly")) summary(lme(Y ~ N + V, random = ~1 | B/V, data = oats)) # 10.3 Non-linear mixed effects models options(contrasts = c("contr.treatment", "contr.poly")) sitka.nlme <- nlme(size ~ A + B * (1 - exp(-(Time-100)/C)), fixed = list(A ~ treat, B ~ treat, C ~ 1), random = A + B ~ 1 | tree, data = Sitka, start = list(fixed = c(2, 0, 4, 0, 100)), verbose = TRUE) summary(sitka.nlme) summary(update(sitka.nlme, corr = corCAR1(0.95, ~Time | tree))) Fpl <- deriv(~ A + (B-A)/(1 + exp((log(d) - ld50)/th)), c("A","B","ld50","th"), function(d, A, B, ld50, th) {}) st <- coef(nls(BPchange ~ Fpl(Dose, A, B, ld50, th), start = c(A = 25, B = 0, ld50 = 4, th = 0.25), data = Rabbit)) Rc.nlme <- nlme(BPchange ~ Fpl(Dose, A, B, ld50, th), fixed = list(A ~ 1, B ~ 1, ld50 ~ 1, th ~ 1), random = A + ld50 ~ 1 | Animal, data = Rabbit, subset = Treatment == "Control", start = list(fixed = st)) ## The next fails on some R platforms and some versions of nlme ## Rm.nlme <- update(Rc.nlme, subset = Treatment=="MDL") ## so update starting values st <- coef(nls(BPchange ~ Fpl(Dose, A, B, ld50, th), start = c(A = 25, B = 0, ld50 = 4, th = 0.25), data = Rabbit, subset = Treatment == "MDL")) Rm.nlme <- update(Rc.nlme, subset = Treatment=="MDL", start = list(fixed = st)) Rc.nlme Rm.nlme c1 <- c(28, 1.6, 4.1, 0.27, 0) R.nlme1 <- nlme(BPchange ~ Fpl(Dose, A, B, ld50, th), fixed = list(A ~ Treatment, B ~ Treatment, ld50 ~ Treatment, th ~ Treatment), random = A + ld50 ~ 1 | Animal/Run, data = Rabbit, start = list(fixed = c1[c(1, 5, 2, 5, 3, 5, 4, 5)])) summary(R.nlme1) R.nlme2 <- update(R.nlme1, fixed = list(A ~ 1, B ~ 1, ld50 ~ Treatment, th ~ 1), start = list(fixed = c1[c(1:3, 5, 4)])) anova(R.nlme2, R.nlme1) summary(R.nlme2) xyplot(BPchange ~ log(Dose) | Animal * Treatment, Rabbit, xlab = "log(Dose) of Phenylbiguanide", ylab = "Change in blood pressure (mm Hg)", subscripts = TRUE, aspect = "xy", panel = function(x, y, subscripts) { panel.grid() panel.xyplot(x, y) sp <- spline(x, fitted(R.nlme2)[subscripts]) panel.xyplot(sp$x, sp$y, type = "l") }) # 10.4 Generalized linear mixed models contrasts(bacteria$trt) <- structure(contr.sdif(3), dimnames = list(NULL, c("drug", "encourage"))) summary(glm(y ~ trt * week, binomial, data = bacteria)) summary(glm(y ~ trt + week, binomial, data = bacteria)) summary(glm(y ~ lbase*trt + lage + V4, family = poisson, data = epil)) epil2 <- epil[epil$period == 1, ] epil2["period"] <- rep(0, 59); epil2["y"] <- epil2["base"] epil["time"] <- 1; epil2["time"] <- 4 epil2 <- rbind(epil, epil2) epil2$pred <- unclass(epil2$trt) * (epil2$period > 0) epil2$subject <- factor(epil2$subject) epil3 <- aggregate(epil2, list(epil2$subject, epil2$period > 0), function(x) if(is.numeric(x)) sum(x) else x[1]) epil3$pred <- factor(epil3$pred, labels = c("base", "placebo", "drug")) contrasts(epil3$pred) <- structure(contr.sdif(3), dimnames = list(NULL, c("placebo-base", "drug-placebo"))) summary(glm(y ~ pred + factor(subject) + offset(log(time)), family = poisson, data = epil3)) glm(y ~ factor(subject), family = poisson, data = epil) library(survival) bacteria$Time <- rep(1, nrow(bacteria)) coxph(Surv(Time, unclass(y)) ~ week + strata(ID), data = bacteria, method = "exact") coxph(Surv(Time, unclass(y)) ~ factor(week) + strata(ID), data = bacteria, method = "exact") coxph(Surv(Time, unclass(y)) ~ I(week > 2) + strata(ID), data = bacteria, method = "exact") fit <- glm(y ~ trt + I(week> 2), binomial, data = bacteria) summary(fit) sum(residuals(fit, type = "pearson")^2) if(FALSE) { # slow ## package available from http://www.stats.ox.ac.uk/pub/RWin ## but these examples fail library(GLMMGibbs) # declare a random intercept for each subject epil$subject <- Ra(data = factor(epil$subject)) glmm(y ~ lbase*trt + lage + V4 + subject, family = poisson, data = epil, keep = 100000, thin = 100) epil3$subject <- Ra(data = factor(epil3$subject)) glmm(y ~ pred + subject, family = poisson, data = epil3, keep = 100000, thin = 100) } summary(glmmPQL(y ~ trt + I(week> 2), random = ~ 1 | ID, family = binomial, data = bacteria)) summary(glmmPQL(y ~ lbase*trt + lage + V4, random = ~ 1 | subject, family = poisson, data = epil)) summary(glmmPQL(y ~ pred, random = ~1 | subject, family = poisson, data = epil3)) # 10.5 GEE models ## modified for YAGS >= 3.21-3 ## package available from http://www.stats.ox.ac.uk/pub/RWin if(require(yags)) { print(yags(y == "y" ~ trt + I(week > 2), family = binomial, alphainit=0, id = ID, corstr = "exchangeable", data = bacteria)) print(yags(y ~ lbase*trt + lage + V4, family = poisson, alphainit=0, id = subject, corstr = "exchangeable", data = epil)) } options(contrasts = c("contr.sum", "contr.poly")) library(gee) summary(gee(y ~ pred + factor(subject), family = poisson, id = subject, data = epil3, corstr = "exchangeable")) # End of ch10 MASS/inst/scripts/ch09.R0000644000176000001440000000460711754561330014413 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 9 Tree-based Methods library(MASS) pdf(file="ch09.pdf", width=8, height=6, pointsize=9) options(digits=5) library(rpart) # Figure 9.3 shuttle.rp <- rpart(use ~ ., data=shuttle, minbucket=0, xval = 0, maxsurrogate = 0, cp = 0, subset = 1:253) post(shuttle.rp, horizontal = FALSE, height=10, width=8, title = "", pointsize = 8, pretty = 0) # 9.3 Implementation in rpart set.seed(123) cpus.rp <- rpart(log10(perf) ~ ., cpus[ , 2:8], cp = 1e-3) cpus.rp print(cpus.rp, cp = 0.01) # default pruning plot(cpus.rp, uniform = TRUE) text(cpus.rp, digits = 3) printcp(cpus.rp) plotcp(cpus.rp) cpus.rp1 <- prune(cpus.rp, cp = 0.006) print(cpus.rp1, digits = 3) plot(cpus.rp1, branch = 0.4, uniform = TRUE) text(cpus.rp1, digits = 3) # for figure 9.2 cpus.rp2 <- prune(cpus.rp, cp = 0.03) post(cpus.rp2, horizontal = FALSE, title = "", digits=4, pointsize=18) set.seed(123) fgl.rp <- rpart(type ~ ., fgl, cp = 0.001) plotcp(fgl.rp) printcp(fgl.rp) fgl.rp2 <- prune(fgl.rp, cp = 0.02) plot(fgl.rp2, uniform = TRUE) text(fgl.rp2, use.n = TRUE) fgl.rp2 summary(fgl.rp2) set.seed(123) fgl.rp3 <- rpart(type ~ ., fgl, cp = 0.001, parms = list(split="information")) plotcp(fgl.rp3) printcp(fgl.rp3) fgl.rp4 <- prune(fgl.rp3, cp = 0.03) plot(fgl.rp4, uniform = TRUE); text(fgl.rp4, use.n = TRUE) plot(cpus.rp, branch = 0.6, compress = TRUE, uniform = TRUE) text(cpus.rp, digits = 3, all = TRUE, use.n = TRUE) # 9.3 Implementation in tree library(tree) ## the stopping criteria differ slightly between R and S-PLUS cpus.ltr <- tree(log10(perf) ~ ., data = cpus[, 2:8], mindev = 0.005) summary(cpus.ltr) cpus.ltr plot(cpus.ltr, type="u"); text(cpus.ltr) par(mfrow = c(1, 2), pty = "s") set.seed(321) plot(cv.tree(cpus.ltr, , prune.tree)) cpus.ltr1 <- prune.tree(cpus.ltr, best = 10) plot(cpus.ltr1, type = "u") text(cpus.ltr1, digits = 3) par(mfrow = c(1, 1), pty = "m") fgl.tr <- tree(type ~ ., fgl) summary(fgl.tr) plot(fgl.tr) text(fgl.tr, all = TRUE, cex = 0.5) par(mfrow = c(1, 2), pty = "s") set.seed(123) fgl.cv <- cv.tree(fgl.tr,, prune.misclass) for(i in 2:5) fgl.cv$dev <- fgl.cv$dev + cv.tree(fgl.tr,, prune.misclass)$dev fgl.cv$dev <- fgl.cv$dev/5 fgl.cv plot(fgl.cv) fgl.tr1 <- prune.misclass(fgl.tr, best = 9) plot(fgl.tr1, type = "u") text(fgl.tr1, all = TRUE) # End of ch09 MASS/inst/scripts/ch07.R0000644000176000001440000001341511754561330014406 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 7 Generalized Linear Models library(MASS) options(width=65, digits=5, height=9999) pdf(file="ch07.pdf", width=8, height=6, pointsize=9) options(contrasts = c("contr.treatment", "contr.poly")) ax.1 <- glm(Postwt ~ Prewt + Treat + offset(Prewt), family = gaussian, data = anorexia) summary(ax.1) # 7.2 Binomial data options(contrasts = c("contr.treatment", "contr.poly")) ldose <- rep(0:5, 2) numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) sex <- factor(rep(c("M", "F"), c(6, 6))) SF <- cbind(numdead, numalive = 20 - numdead) budworm.lg <- glm(SF ~ sex*ldose, family = binomial) summary(budworm.lg) plot(c(1,32), c(0,1), type = "n", xlab = "dose", ylab = "prob", log = "x") text(2^ldose, numdead/20, labels = as.character(sex)) ld <- seq(0, 5, 0.1) lines(2^ld, predict(budworm.lg, data.frame(ldose = ld, sex = factor(rep("M", length(ld)), levels = levels(sex))), type = "response"), col = 3) lines(2^ld, predict(budworm.lg, data.frame(ldose = ld, sex = factor(rep("F", length(ld)), levels = levels(sex))), type = "response"), lty = 2, col = 2) budworm.lgA <- update(budworm.lg, . ~ sex * I(ldose - 3)) summary(budworm.lgA, cor = F)$coefficients anova(update(budworm.lg, . ~ . + sex * I(ldose^2)), test = "Chisq") budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial) summary(budworm.lg0, cor = F)$coefficients dose.p(budworm.lg0, cf = c(1,3), p = 1:3/4) dose.p(update(budworm.lg0, family = binomial(link = probit)), cf = c(1, 3), p = 1:3/4) options(contrasts = c("contr.treatment", "contr.poly")) attach(birthwt) race <- factor(race, labels = c("white", "black", "other")) table(ptl) ptd <- factor(ptl > 0) table(ftv) ftv <- factor(ftv) levels(ftv)[-(1:2)] <- "2+" table(ftv) # as a check bwt <- data.frame(low = factor(low), age, lwt, race, smoke = (smoke > 0), ptd, ht = (ht > 0), ui = (ui > 0), ftv) detach(); rm(race, ptd, ftv) birthwt.glm <- glm(low ~ ., family = binomial, data = bwt) summary(birthwt.glm) birthwt.step <- stepAIC(birthwt.glm, trace = FALSE) birthwt.step$anova birthwt.step2 <- stepAIC(birthwt.glm, ~ .^2 + I(scale(age)^2) + I(scale(lwt)^2), trace = FALSE) birthwt.step2$anova summary(birthwt.step2)$coef table(bwt$low, predict(birthwt.step2) > 0) ## R has a similar gam() in package gam and a different gam() in package mgcv if(require(gam)) { attach(bwt) age1 <- age*(ftv=="1"); age2 <- age*(ftv=="2+") birthwt.gam <- gam(low ~ s(age) + s(lwt) + smoke + ptd + ht + ui + ftv + s(age1) + s(age2) + smoke:ui, binomial, bwt, bf.maxit=25) print(summary(birthwt.gam)) print(table(low, predict(birthwt.gam) > 0)) par(mfrow = c(2, 2)) if(interactive()) plot(birthwt.gam, ask = TRUE, se = TRUE) par(mfrow = c(1, 1)) detach() } library(mgcv) attach(bwt) age1 <- age*(ftv=="1"); age2 <- age*(ftv=="2+") (birthwt.gam <- gam(low ~ s(age) + s(lwt) + smoke + ptd + ht + ui + ftv + s(age1) + s(age2) + smoke:ui, binomial, bwt)) table(low, predict(birthwt.gam) > 0) par(mfrow = c(2, 2)) plot(birthwt.gam, se = TRUE) par(mfrow = c(1, 1)) detach() # 7.3 Poisson models names(housing) house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family = poisson, data = housing) summary(house.glm0) addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test = "Chisq") house.glm1 <- update(house.glm0, . ~ . + Sat:(Infl+Type+Cont)) summary(house.glm1) 1 - pchisq(deviance(house.glm1), house.glm1$df.resid) dropterm(house.glm1, test = "Chisq") addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq") hnames <- lapply(housing[, -5], levels) # omit Freq house.pm <- predict(house.glm1, expand.grid(hnames), type = "response") # poisson means house.pm <- matrix(house.pm, ncol = 3, byrow = TRUE, dimnames = list(NULL, hnames[[1]])) house.pr <- house.pm/drop(house.pm %*% rep(1, 3)) cbind(expand.grid(hnames[-1]), round(house.pr, 2)) loglm(Freq ~ Infl*Type*Cont + Sat*(Infl+Type+Cont), data = housing) library(nnet) (house.mult <- multinom(Sat ~ Infl + Type + Cont, weights = Freq, data = housing)) house.mult2 <- multinom(Sat ~ Infl*Type*Cont, weights = Freq, data = housing) anova(house.mult, house.mult2, test = "none") house.pm <- predict(house.mult, expand.grid(hnames[-1]), type = "probs") cbind(expand.grid(hnames[-1]), round(house.pm, 2)) house.cpr <- apply(house.pr, 1, cumsum) logit <- function(x) log(x/(1-x)) house.ld <- logit(house.cpr[2, ]) - logit(house.cpr[1, ]) sort(drop(house.ld)) mean(.Last.value) house.plr <- polr(Sat ~ Infl + Type + Cont, data = housing, weights = Freq) house.plr house.pr1 <- predict(house.plr, expand.grid(hnames[-1]), type = "probs") cbind(expand.grid(hnames[-1]), round(house.pr1, 2)) Fr <- matrix(housing$Freq, ncol = 3, byrow = TRUE) 2 * sum(Fr * log(house.pr/house.pr1)) house.plr2 <- stepAIC(house.plr, ~.^2) house.plr2$anova # 7.4 A negative binomial family glm(Days ~ .^4, family = poisson, data = quine) quine.nb <- glm(Days ~ .^4, family = negative.binomial(2), data = quine) quine.nb0 <- update(quine.nb, . ~ Sex/(Age + Eth*Lrn)) anova(quine.nb0, quine.nb, test = "Chisq") quine.nb <- glm.nb(Days ~ .^4, data = quine) quine.nb2 <- stepAIC(quine.nb) quine.nb2$anova dropterm(quine.nb2, test = "Chisq") quine.nb3 <- update(quine.nb2, . ~ . - Eth:Age:Lrn - Sex:Age:Lrn) anova(quine.nb2, quine.nb3) c(theta = quine.nb2$theta, SE = quine.nb2$SE) par(mfrow = c(2,2), pty = "m") rs <- resid(quine.nb2, type = "deviance") plot(predict(quine.nb2), rs, xlab = "Linear predictors", ylab = "Deviance residuals") abline(h = 0, lty = 2) qqnorm(rs, ylab = "Deviance residuals") qqline(rs) par(mfrow = c(1,1)) # End of ch07 MASS/inst/scripts/ch14.R0000644000176000001440000001571211754561330014406 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 14 Time Series library(MASS) pdf(file="ch14.pdf", width=8, height=6, pointsize=9) options(width=65, digits=5) lh deaths #tspar(deaths) tsp(deaths) start(deaths) end(deaths) frequency(deaths) cycle(deaths) ts.plot(lh) ts.plot(deaths, mdeaths, fdeaths, lty = c(1, 3, 4), xlab = "year", ylab = "deaths") aggregate(deaths, 4, sum) aggregate(deaths, 1, mean) # 14.1 Second-order summaries acf(lh) acf(lh, type = "covariance") acf(deaths) acf(ts.union(mdeaths, fdeaths)) par(mfrow = c(2, 2)) spectrum(lh) spectrum(deaths) par(mfrow = c(2, 2)) spectrum(lh) spectrum(lh, spans = 3) spectrum(lh, spans = c(3, 3)) spectrum(lh, spans = c(3, 5)) spectrum(deaths) spectrum(deaths, spans = c(3, 3)) spectrum(deaths, spans = c(3, 5)) spectrum(deaths, spans = c(5, 7)) par(mfrow = c(1, 2)) cpgram(lh) cpgram(deaths) par(mfrow = c(1, 1)) # 14.2 ARIMA models # ts.sim <- arima.sim(list(order = c(1,1,0), ar = 0.7), n = 200) acf(lh, type = "partial") acf(deaths, type = "partial") lh.ar1 <- ar(lh, FALSE, 1) cpgram(lh.ar1$resid, main = "AR(1) fit to lh") lh.ar <- ar(lh, order.max = 9) lh.ar$order lh.ar$aic cpgram(lh.ar$resid, main = "AR(3) fit to lh") (lh.arima1 <- arima(lh, order = c(1,0,0))) tsdiag(lh.arima1) (lh.arima3 <- arima(lh, order = c(3,0,0))) tsdiag(lh.arima3) (lh.arima11 <- arima(lh, order = c(1,0,1))) lh.fore <- predict(lh.arima3, 12) ts.plot(lh, lh.fore$pred, lh.fore$pred + 2*lh.fore$se, lh.fore$pred - 2*lh.fore$se, lty = c(1,2,3,3)) # 14.3 Seasonality deaths.stl <- stl(deaths, "periodic") dsd <- deaths.stl$time.series[, "trend"] + deaths.stl$time.series[, "remainder"] #ts.plot(deaths, deaths.stl$sea, deaths.stl$rem) ts.plot(deaths, deaths.stl$time.series[, "seasonal"], dsd, gpars = list(lty = c(1, 3, 2))) par(mfrow = c(2, 3)) #dsd <- deaths.stl$rem ts.plot(dsd) acf(dsd) acf(dsd, type = "partial") spectrum(dsd, span = c(3, 3)) cpgram(dsd) dsd.ar <- ar(dsd) dsd.ar$order dsd.ar$aic dsd.ar$ar cpgram(dsd.ar$resid, main = "AR(1) residuals") par(mfrow = c(1, 1)) deaths.diff <- diff(deaths, 12) acf(deaths.diff, 30) acf(deaths.diff, 30, type = "partial") ar(deaths.diff) # this suggests the seasonal effect is still present. (deaths.arima1 <- arima(deaths, order = c(2,0,0), seasonal = list(order = c(0,1,0), period = 12)) ) tsdiag(deaths.arima1, gof.lag = 30) # suggests need a seasonal AR term (deaths.arima2 <- arima(deaths, order = c(2,0,0), list(order = c(1,0,0), period = 12)) ) tsdiag(deaths.arima2, gof.lag = 30) cpgram(deaths.arima2$resid) (deaths.arima3 <- arima(deaths, order = c(2,0,0), list(order = c(1,1,0), period = 12)) ) tsdiag(deaths.arima3, gof.lag = 30) par(mfrow = c(3, 1)) nott <- window(nottem, end = c(1936, 12)) ts.plot(nott) nott.stl <- stl(nott, "period") ts.plot(nott.stl$time.series[, c("remainder", "seasonal")], gpars = list(ylim = c(-15, 15), lty = c(1, 3))) nott.stl <- stl(nott, 5) ts.plot(nott.stl$time.series[, c("remainder", "seasonal")], ylim = c(-15, 15), lty = c(1, 3)) par(mfrow = c(1, 1)) boxplot(split(nott, cycle(nott)), names = month.abb) nott[110] <- 35 nott.stl <- stl(nott, "period") nott1 <- nott.stl$time.series[, "trend"] + nott.stl$time.series[, "remainder"] acf(nott1) acf(nott1, type = "partial") cpgram(nott1) ar(nott1)$aic plot(0:23, ar(nott1)$aic, xlab = "order", ylab = "AIC", main = "AIC for AR(p)") (nott1.ar1 <- arima(nott1, order = c(1,0,0))) nott1.fore <- predict(nott1.ar1, 36) nott1.fore$pred <- nott1.fore$pred + as.vector(nott.stl$time.series[1:36, "seasonal"]) ts.plot(window(nottem, 1937), nott1.fore$pred, nott1.fore$pred+2*nott1.fore$se, nott1.fore$pred-2*nott1.fore$se, lty = c(3, 1, 2, 2)) title("via Seasonal Decomposition") acf(diff(nott,12), 30) acf(diff(nott,12), 30, type = "partial") cpgram(diff(nott, 12)) (nott.arima1 <- arima(nott, order = c(1,0,0), list(order = c(2,1,0), period = 12)) ) tsdiag(nott.arima1, gof.lag = 30) (nott.arima2 <- arima(nott, order = c(0,0,2), list(order = c(0,1,2), period = 12)) ) tsdiag(nott.arima2, gof.lag = 30) (nott.arima3 <- arima(nott, order = c(1,0,0), list(order = c(0,1,2), period = 12)) ) tsdiag(nott.arima3, gof.lag = 30) nott.fore <- predict(nott.arima3, 36) ts.plot(window(nottem, 1937), nott.fore$pred, nott.fore$pred+2*nott.fore$se, nott.fore$pred-2*nott.fore$se, lty = c(3, 1, 2, 2)) title("via Seasonal ARIMA model") # 14.6 Regression with autocorrelated errors attach(beav1) beav1$hours <- 24*(day-346) + trunc(time/100) + (time%%100)/60 detach() attach(beav2) beav2$hours <- 24*(day-307) + trunc(time/100) + (time%%100)/60 detach() par(mfrow = c(2, 2)) plot(beav1$hours, beav1$temp, type = "l", xlab = "time", ylab = "temperature", main = "Beaver 1") usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr = usr) lines(beav1$hours, beav1$activ, type = "s", lty = 2) plot(beav2$hours, beav2$temp, type = "l", xlab = "time", ylab = "temperature", main = "Beaver 2") usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr = usr) lines(beav2$hours, beav2$activ, type = "s", lty = 2) attach(beav2) temp2 <- ts(temp, start = 8+2/3, frequency = 6) activ2 <- ts(activ, start = 8+2/3, frequency = 6) acf(temp2[activ2 == 0]) acf(temp2[activ2 == 1]) # also look at PACFs acf(temp2[activ2 == 0], type = "partial") acf(temp2[activ2 == 1], type = "partial") ar(temp2[activ2 == 0]) ar(temp2[activ2 == 1]) par(mfrow = c(1, 1)) detach() rm(temp2, activ2) library(nlme) beav2.gls <- gls(temp ~ activ, data = beav2, corr = corAR1(0.8), method = "ML") summary(beav2.gls) summary(update(beav2.gls, subset = 6:100)) arima(beav2$temp, c(1,0,0), xreg = beav2$activ) attach(beav1) temp1 <- ts(c(temp[1:82], NA, temp[83:114]), start = 9.5, frequency = 6) activ1 <- ts(c(activ[1:82], NA, activ[83:114]), start = 9.5, frequency = 6) acf(temp1[1:53]) acf(temp1[1:53], type = "partial") ar(temp1[1:53]) act <- c(rep(0, 10), activ1) beav1b <- data.frame(Time = time(temp1), temp = as.vector(temp1), act = act[11:125], act1 = act[10:124], act2 = act[9:123], act3 = act[8:122]) detach() rm(temp1, activ1) summary(gls(temp ~ act + act1 + act2 + act3, data = beav1b, na.action = na.omit, corr = corCAR1(0.82^6, ~Time), method = "ML")) arima(beav1b$temp, c(1, 0, 0), xreg = beav1b[, 3:6]) # 14.6 Models for financial time series plot(SP500, type = "l", xlab = "", ylab = "returns (%)", xaxt = "n", las = 1) axis(1, at = c(0, 254, 507, 761, 1014, 1266, 1518, 1772, 2025, 2277, 2529, 2781), lab = 1990:2001) plot(density(SP500, width = "sj", n = 256), type = "l", xlab = "", ylab = "") par(pty = "s") qqnorm(SP500) qqline(SP500) if(FALSE) { module(garch) summary(garch(SP500 ~ 1, ~garch(1,1))) fit <- garch(SP500 ~ 1, ~garch(1,1), cond.dist = "t") summary(fit) plot(fit) summary(garch(SP500 ~ 1, ~egarch(1,1), cond.dist = "t", leverage = TRUE)) } if(require(tseries)) print(summary(garch(x = SP500 - median(SP500), order = c(1, 1)))) # End of ch14 MASS/inst/scripts/ch01.R0000644000176000001440000000416311754561330014400 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' options(width=65, digits=5, height=9999) pdf(file="ch01.pdf", width=8, height=6, pointsize=9) # Chapter 1 Introduction # 1.1 A quick overview of S 2 + 3 sqrt(3/4)/(1/3 - 2/pi^2) library(MASS) mean(chem) m <- mean(chem); v <- var(chem)/length(chem) m/sqrt(v) std.dev <- function(x) sqrt(var(x)) t.test.p <- function(x, mu=0) { n <- length(x) t <- sqrt(n) * (mean(x) - mu) / std.dev(x) 2 * (1 - pt(abs(t), n - 1)) } t.stat <- function(x, mu = 0) { n <- length(x) t <- sqrt(n) * (mean(x) - mu) / std.dev(x) list(t = t, p = 2 * (1 - pt(abs(t), n - 1))) } z <- rnorm(300, 1, 2) # generate 300 N(1, 4) variables. t.stat(z) unlist(t.stat(z, 1)) # test mu=1, compact result # 1.4 An introductory session x <- rnorm(1000) y <- rnorm(1000) truehist(c(x,y+3), nbins=25) # ?truehist contour(dd <- kde2d(x,y)) image(dd) x <- seq(1, 20, 0.5) x w <- 1 + x/2 y <- x + w*rnorm(x) dum <- data.frame(x, y, w) dum rm(x, y, w) fm <- lm(y ~ x, data=dum) summary(fm) fm1 <- lm(y ~ x, data = dum, weight = 1/w^2) summary(fm1) lrf <- loess(y ~ x, dum) attach(dum) plot(x, y) lines(spline(x, fitted(lrf)), col = 2) abline(0, 1, lty = 3, col = 3) abline(fm, col = 4) abline(fm1, lty = 4, col = 5) plot(fitted(fm), resid(fm), xlab = "Fitted Values", ylab = "Residuals") qqnorm(resid(fm)) qqline(resid(fm)) detach() rm(fm,fm1,lrf,dum) hills # S: splom(~ hills) pairs(hills) # S: if(interactive()) brush(hills) attach(hills) plot(dist, time) if(interactive()) identify(dist, time, row.names(hills)) abline(lm(time ~ dist)) # library(lqs) abline(lqs(time ~ dist), lty=3, col=4) detach() if(interactive()){ plot(c(0,1), c(0,1), type="n") xy <- locator(type = "p") abline(lm(y ~ x, xy), col = 4) abline(rlm(y ~ x, xy, method = "MM"), lty = 3, col = 3) abline(lqs(y ~ x, xy), lty = 2, col = 2) rm(xy) } attach(michelson) search() plot(Expt, Speed, main="Speed of Light Data", xlab="Experiment No.") fm <- aov(Speed ~ Run + Expt) summary(fm) fm0 <- update(fm, . ~ . - Run) anova(fm0, fm) detach() rm(fm, fm0) 1 - pf(4.3781, 4, 76) qf(0.95, 4, 76) # End of ch01 MASS/inst/scripts/ch15.R0000644000176000001440000001406411754561330014406 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 15 Spatial Statistics library(MASS) pdf(file="ch15.pdf", width=8, height=8, pointsize=9) options(width=65, digits=5) library(spatial) # 15.1 Spatial interpolation and smoothing par(mfrow=c(2,2), pty = "s") topo.ls <- surf.ls(2, topo) trsurf <- trmat(topo.ls, 0, 6.5, 0, 6.5, 30) eqscplot(trsurf, , xlab = "", ylab = "", type = "n") contour(trsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) title("Degree=2") topo.ls <- surf.ls(3, topo) trsurf <- trmat(topo.ls, 0, 6.5, 0, 6.5, 30) eqscplot(trsurf, , xlab = "", ylab = "", type = "n") contour(trsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) title("Degree=3") topo.ls <- surf.ls(4, topo) trsurf <- trmat(topo.ls, 0, 6.5, 0, 6.5, 30) eqscplot(trsurf, , xlab = "", ylab = "", type = "n") contour(trsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) title("Degree=4") topo.ls <- surf.ls(6, topo) trsurf <- trmat(topo.ls, 0, 6.5, 0, 6.5, 30) eqscplot(trsurf, , xlab = "", ylab = "", type = "n") contour(trsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) title("Degree=6") library(lattice) topo.ls <- surf.ls(4, topo) trsurf <- trmat(topo.ls, 0, 6.5, 0, 6.5, 30) trsurf[c("x", "y")] <- expand.grid(x=trsurf$x, y=trsurf$y) plt1 <- levelplot(z ~ x * y, trsurf, aspect=1, at = seq(650, 1000, 10), xlab = "", ylab = "") plt2 <- wireframe(z ~ x * y, trsurf, aspect=c(1, 0.5), screen = list(z = -30, x = -60)) print(plt1, position = c(0, 0, 0.5, 1), more=TRUE) print(plt2, position = c(0.45, 0, 1, 1)) par(mfcol = c(2, 2), pty = "s") topo.loess <- loess(z ~ x * y, topo, degree = 2, span = 0.25, normalize = FALSE) topo.mar <- list(x = seq(0, 6.5, 0.1), y = seq(0, 6.5, 0.1)) topo.lo <- predict(topo.loess, expand.grid(topo.mar), se = TRUE) eqscplot(topo.mar, xlab = "fit", ylab = "", type = "n") contour(topo.mar$x, topo.mar$y, topo.lo$fit, levels = seq(700, 1000, 25), add = TRUE) points(topo) eqscplot(topo.mar, xlab = "standard error", ylab = "", type = "n") contour(topo.mar$x,topo.mar$y,topo.lo$se.fit, levels = seq(5, 25, 5), add = TRUE) title("Loess degree = 2") points(topo) topo.loess <- loess(z ~ x * y, topo, degree = 1, span = 0.25, normalize = FALSE) topo.lo <- predict(topo.loess, expand.grid(topo.mar), se=TRUE) eqscplot(topo.mar, xlab = "fit", ylab = "", type = "n") contour(topo.mar$x,topo.mar$y,topo.lo$fit, levels = seq(700, 1000, 25), add = TRUE) points(topo) eqscplot(topo.mar, xlab = "standard error", ylab = "", type = "n") contour(topo.mar$x,topo.mar$y,topo.lo$se.fit, levels = seq(5, 25, 5), add = TRUE) title("Loess degree = 1") points(topo) library(akima) par(mfrow = c(1, 2), pty= "s") topo.int <- interp.old(topo$x, topo$y, topo$z) eqscplot(topo.int, xlab = "interp default", ylab = "", type = "n") contour(topo.int, levels = seq(600, 1000, 25), add = TRUE) points(topo) topo.mar <- list(x = seq(0, 6.5, 0.1), y = seq(0, 6.5, 0.1)) topo.int2 <- interp.old(topo$x, topo$y, topo$z, topo.mar$x, topo.mar$y, ncp = 4, extrap = TRUE) eqscplot(topo.int2, xlab = "interp", ylab = "", type = "n") contour(topo.int2, levels = seq(600, 1000, 25), add = TRUE) points(topo) # 15.2 Kriging par(mfrow = c(2, 2), pty = "s") topo.ls <- surf.ls(2, topo) trsurf <- trmat(topo.ls, 0, 6.5, 0, 6.5, 30) eqscplot(trsurf, , xlab = "", ylab = "", type = "n") contour(trsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) title("LS trend surface") topo.gls <- surf.gls(2, expcov, topo, d = 0.7) trsurf <- trmat(topo.gls, 0, 6.5, 0, 6.5, 30) eqscplot(trsurf, , xlab = "", ylab = "", type = "n") contour(trsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) title("GLS trend surface") prsurf <- prmat(topo.gls, 0, 6.5, 0, 6.5, 50) eqscplot(prsurf, , xlab = "", ylab = "", type = "n") contour(prsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) title("Kriging prediction") sesurf <- semat(topo.gls, 0, 6.5, 0, 6.5, 30) eqscplot(sesurf, , xlab = "", ylab = "", type = "n") contour(sesurf, levels = c(20, 25), add = TRUE) points(topo) title("Kriging s.e.") par(mfrow = c(2, 2), pty = "m") topo.kr <- surf.ls(2, topo) correlogram(topo.kr, 25) d <- seq(0, 7, 0.1) lines(d, expcov(d, 0.7)) variogram(topo.kr, 25) ## left panel of Figure 15.7 topo.kr <- surf.gls(2, expcov, topo, d=0.7) correlogram(topo.kr, 25) lines(d, expcov(d, 0.7)) lines(d, gaucov(d, 1.0, 0.3), lty = 3) # try nugget effect ## right panel topo.kr <- surf.ls(0, topo) correlogram(topo.kr, 25) lines(d, gaucov(d, 2, 0.05)) par(mfrow = c(2, 2), pty = "s") ## top row of Figure 15.8 topo.kr <- surf.gls(2, gaucov, topo, d = 1, alph = 0.3) prsurf <- prmat(topo.kr, 0, 6.5, 0, 6.5, 50) eqscplot(prsurf, , xlab = "fit", ylab = "", type = "n") contour(prsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) sesurf <- semat(topo.kr, 0, 6.5, 0, 6.5, 25) eqscplot(sesurf, , xlab = "standard error", ylab = "", type = "n") contour(sesurf, levels = c(15, 20, 25), add = TRUE) points(topo) ## bottom row of Figure 15.8 topo.kr <- surf.gls(0, gaucov, topo, d = 2, alph = 0.05, nx = 10000) prsurf <- prmat(topo.kr, 0, 6.5, 0, 6.5, 50) eqscplot(prsurf, , xlab = "fit", ylab = "", type = "n") contour(prsurf, levels = seq(600, 1000, 25), add = TRUE) points(topo) sesurf <- semat(topo.kr, 0, 6.5, 0, 6.5, 25) eqscplot(sesurf, , xlab = "standard error", ylab = "", type = "n") contour(sesurf, levels = c(15, 20, 25), add = TRUE) points(topo) # 15.3 Point process analysis library(spatial) pines <- ppinit("pines.dat") par(mfrow = c(2, 2), pty = "s") plot(pines, xlim = c(0, 10), ylim = c(0, 10), xlab = "", ylab = "", xaxs = "i", yaxs = "i") plot(Kfn(pines,5), type = "s", xlab = "distance", ylab = "L(t)") lims <- Kenvl(5, 100, Psim(72)) lines(lims$x, lims$l, lty = 2) lines(lims$x, lims$u, lty = 2) ppregion(pines) plot(Kfn(pines, 1.5), type = "s", xlab = "distance", ylab = "L(t)") lims <- Kenvl(1.5, 100, Strauss(72, 0.2, 0.7)) lines(lims$x, lims$a, lty = 2) lines(lims$x, lims$l, lty = 2) lines(lims$x, lims$u, lty = 2) pplik(pines, 0.7) lines(Kaver(1.5, 100, Strauss(72, 0.15, 0.7)), lty = 3) # End of ch15 MASS/inst/scripts/ch02.R0000644000176000001440000000745311754561330014406 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 2 Data Manipulation library(MASS) options(width=65, digits=5, height=9999) -2:2 powers.of.pi <- pi^(-2:2) powers.of.pi class(powers.of.pi) print(powers.of.pi) summary(powers.of.pi) # rm(powers.of.pi) powers.of.pi[5] names(powers.of.pi) <- -2:2 powers.of.pi powers.of.pi["2"] class(powers.of.pi) as.vector(powers.of.pi) names(powers.of.pi) <- NULL powers.of.pi citizen <- factor(c("uk", "us", "no", "au", "uk", "us", "us")) citizen unclass(citizen) citizen[5:7] citizen <- factor(c("uk", "us", "no", "au", "uk", "us", "us"), levels = c("us", "fr", "no", "au", "uk")) citizen income <- ordered(c("Mid", "Hi", "Lo", "Mid", "Lo", "Hi", "Lo")) income as.numeric(income) inc <- ordered(c("Mid", "Hi", "Lo", "Mid", "Lo", "Hi", "Lo"), levels = c("Lo", "Mid", "Hi")) inc erupt <- cut(geyser$duration, breaks = 0:6) erupt <- ordered(erupt, labels=levels(erupt)) erupt painters row.names(painters) summary(painters) # try it! attach(painters) School detach("painters") mymat <- matrix(1:30, 3, 10) mymat myarr <- mymat dim(myarr) <- c(3, 5, 2) class(myarr) myarr dim(myarr) dimnames(myarr) <- list(letters[1:3], NULL, c("(i)", "(ii)")) myarr newvar <- NA class(NA) newvar > 3 x <- c(pi, 4, 5) x[2] <- NA x class(x) is.na(x) 1/0 x <- c(-1, 0, 1)/0 x is.na(x) x > Inf x <- c(2.9, 3.1, 3.4, 3.4, 3.7, 3.7, 2.8, 2.5) letters[1:3] letters[c(1:3,3:1)] longitude <- state.center$x names(longitude) <- state.name longitude[c("Hawaii", "Alaska")] myarr[1, 2:4, ] myarr[1, 2:4, , drop = FALSE] attach(painters) painters[Colour >= 17, ] painters[Colour >= 15 & Composition > 10, ] painters[Colour >= 15 & School != "D", ] painters[is.element(School, c("A", "B", "D")), ] painters[School %in% c("A", "B", "D"), ] ## R only painters[cbind(1:nrow(painters), ifelse(Colour > Expression, 3, 4))] painters[grep("io$", row.names(painters)), ] detach("painters") m <- 30 fglsub1 <- fgl[sort(sample(1:nrow(fgl), m)), ] fglsub2 <- fgl[rbinom(nrow(fgl), 1, 0.1) == 1, ] fglsub3 <- fgl[seq(1, nrow(fgl), by = 10), ] painters[sort.list(row.names(painters)), ] lcrabs <- crabs # make a copy lcrabs[, 4:8] <- log(crabs[, 4:8]) scrabs <- crabs # make a copy scrabs[, 4:8] <- lapply(scrabs[, 4:8], scale) ## or to just centre the variables scrabs[, 4:8] <- lapply(scrabs[, 4:8], scale, scale = FALSE) scrabs <- crabs # make a copy scrabs[ ] <- lapply(scrabs, function(x) {if(is.numeric(x)) scale(x) else x}) sapply(crabs, is.numeric) by(crabs[, 4:8], list(crabs$sp, crabs$sex), summary) aggregate(crabs[, 4:8], by = list(sp=crabs$sp, sex=crabs$sex), median) authors <- data.frame( surname = c("Tukey", "Venables", "Tierney", "Ripley", "McNeil"), nationality = c("US", "Australia", "US", "UK", "Australia"), deceased = c("yes", rep("no", 4))) books <- data.frame( name = c("Tukey", "Venables", "Tierney", "Ripley", "Ripley", "McNeil", "R Core"), title = c("Exploratory Data Analysis", "Modern Applied Statistics ...", "LISP-STAT", "Spatial Statistics", "Stochastic Simulation", "Interactive Data Analysis", "An Introduction to R")) authors books merge(authors, books, by.x = "surname", by.y = "name") attach(quine) table(Age) table(Sex, Age) tab <- xtabs(~ Sex + Age, quine) unclass(tab) tapply(Days, Age, mean) tapply(Days, Age, mean, trim = 0.1) tapply(Days, list(Sex, Age), mean) tapply(Days, list(Sex, Age), function(x) sqrt(var(x)/length(x))) quineFO <- quine[sapply(quine, is.factor)] #tab <- do.call("table", quineFO) tab <- table(quineFO) QuineF <- expand.grid(lapply(quineFO, levels)) QuineF$Freq <- as.vector(tab) QuineF # End of ch02 MASS/inst/scripts/ch13.R0000644000176000001440000002452711754561330014411 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 13 Survival Analysis library(MASS) options(width=65, digits=5, height=9999) options(contrasts=c("contr.treatment", "contr.poly")) pdf("ch13.pdf", width=8, height=6, pointsize=9) library(survival) # 13.1 Estimators of survivor curves plot(survfit(Surv(time) ~ ag, data=leuk), lty = 2:3, col = 2:3) legend(80, 0.8, c("ag absent", "ag present"), lty = 2:3, col = 2:3) attach(gehan) Surv(time, cens) plot(log(time) ~ pair) # product-limit estimators with Greenwood's formula for errors: gehan.surv <- survfit(Surv(time, cens) ~ treat, data = gehan, conf.type = "log-log") summary(gehan.surv) plot(gehan.surv, conf.int = TRUE, lty = 3:2, log = TRUE, xlab = "time of remission (weeks)", ylab = "survival") lines(gehan.surv, lty = 3:2, lwd = 2, cex = 2) legend(25, 0.1 , c("control", "6-MP"), lty = 2:3, lwd = 2) detach() survdiff(Surv(time, cens) ~ treat, data = gehan) survdiff(Surv(time) ~ ag, data = leuk) # 13.2 Parametric models plot(gehan.surv, lty = 3:4, col = 2:3, fun = "cloglog", xlab = "time of remission (weeks)", ylab = "log H(t)") legend(2, 0.5, c("control","6-MP"), lty = 4:3, col = 3:2) survreg(Surv(time) ~ ag*log(wbc), leuk, dist = "exponential") summary(survreg(Surv(time) ~ ag + log(wbc), leuk, dist = "exponential")) summary(survreg(Surv(time) ~ ag + log(wbc), leuk)) # Weibull summary(survreg(Surv(time) ~ ag + log(wbc), leuk, dist="loglogistic")) anova(survreg(Surv(time) ~ log(wbc), data = leuk), survreg(Surv(time) ~ ag + log(wbc), data = leuk)) summary(survreg(Surv(time) ~ strata(ag) + log(wbc), data=leuk)) leuk.wei <- survreg(Surv(time) ~ ag + log(wbc), leuk) ntimes <- leuk$time * exp(-leuk.wei$linear.predictors) plot(survfit(Surv(ntimes) ~ 1), log = TRUE) survreg(Surv(time, cens) ~ factor(pair) + treat, gehan, dist = "exponential") summary(survreg(Surv(time, cens) ~ treat, gehan, dist = "exponential")) summary(survreg(Surv(time, cens) ~ treat, gehan)) plot(survfit(Surv(time, cens) ~ factor(temp), motors), conf.int = FALSE) motor.wei <- survreg(Surv(time, cens) ~ temp, motors) summary(motor.wei) unlist(predict(motor.wei, data.frame(temp=130), se.fit = TRUE)) predict(motor.wei, data.frame(temp=130), type = "quantile", p = c(0.5, 0.1)) t1 <- predict(motor.wei, data.frame(temp=130), type = "uquantile", p = 0.5, se = TRUE) exp(c(LL=t1$fit - 2*t1$se, UL=t1$fit + 2*t1$se)) t1 <- predict(motor.wei, data.frame(temp=130), type = "uquantile", p = 0.1, se = TRUE) exp(c(LL=t1$fit - 2*t1$se, UL=t1$fit + 2*t1$se)) # summary(censorReg(censor(time, cens) ~ treat, gehan)) # 13.3 Cox proportional hazards model attach(leuk) leuk.cox <- coxph(Surv(time) ~ ag + log(wbc), data = leuk) summary(leuk.cox) update(leuk.cox, ~ . -ag) (leuk.coxs <- coxph(Surv(time) ~ strata(ag) + log(wbc), data = leuk)) (leuk.coxs1 <- update(leuk.coxs, . ~ . + ag:log(wbc))) plot(survfit(Surv(time) ~ ag), lty = 2:3, log = TRUE) lines(survfit(leuk.coxs), lty = 2:3, lwd = 3) legend(80, 0.8, c("ag absent", "ag present"), lty = 2:3) leuk.cox <- coxph(Surv(time) ~ ag, leuk) detach() gehan.cox <- coxph(Surv(time, cens) ~ treat, gehan, method = "exact") summary(gehan.cox) # The next fit is slow coxph(Surv(time, cens) ~ treat + factor(pair), gehan, method = "exact") 1 - pchisq(45.5 - 16.2, 20) (motor.cox <- coxph(Surv(time, cens) ~ temp, motors)) coxph(Surv(time, cens) ~ temp, motors, method = "breslow") coxph(Surv(time, cens) ~ temp, motors, method = "exact") plot( survfit(motor.cox, newdata=data.frame(temp=200), conf.type = "log-log") ) summary( survfit(motor.cox, newdata = data.frame(temp=130)) ) # 13.4 Further examples # VA.temp <- as.data.frame(cancer.vet) # dimnames(VA.temp)[[2]] <- c("treat", "cell", "stime", # "status", "Karn", "diag.time","age","therapy") # attach(VA.temp) # VA <- data.frame(stime, status, treat = factor(treat), age, # Karn, diag.time, cell = factor(cell), prior = factor(therapy)) # detach(VA.temp) (VA.cox <- coxph(Surv(stime, status) ~ treat + age + Karn + diag.time + cell + prior, data = VA)) (VA.coxs <- coxph(Surv(stime, status) ~ treat + age + Karn + diag.time + strata(cell) + prior, data = VA)) par(mfrow=c(1,2), pty="s") plot(survfit(VA.coxs), log = TRUE, lty = 1:4, col = 2:5) #legend(locator(1), c("squamous", "small", "adeno", "large"), lty = 1:4, col = 2:5) plot(survfit(VA.coxs), fun = "cloglog", lty = 1:4, col = 2:5) cKarn <- factor(cut(VA$Karn, 5)) VA.cox1 <- coxph(Surv(stime, status) ~ strata(cKarn) + cell, data = VA) plot(survfit(VA.cox1), fun="cloglog") VA.cox2 <- coxph(Surv(stime, status) ~ Karn + strata(cell), data = VA) scatter.smooth(VA$Karn, residuals(VA.cox2)) VA.wei <- survreg(Surv(stime, status) ~ treat + age + Karn + diag.time + cell + prior, data = VA) summary(VA.wei, cor = FALSE) VA.exp <- survreg(Surv(stime, status) ~ Karn + cell, data = VA, dist = "exponential") summary(VA.exp, cor = FALSE) cox.zph(VA.coxs) par(mfrow = c(3, 2), pty="m"); plot(cox.zph(VA.coxs)) VA2 <- VA ## needed because VA and stepAIC are both in MASS VA2$Karnc <- VA2$Karn - 50 VA.coxc <- update(VA.cox, ~ . - Karn + Karnc, data=VA2) VA.cox2 <- stepAIC(VA.coxc, ~ .^2) VA.cox2$anova (VA.cox3 <- update(VA.cox2, ~ treat/Karnc + prior*Karnc + treat:prior + cell/diag.time)) cox.zph(VA.cox3) par(mfrow = c(2, 2)) plot(cox.zph(VA.cox3), var = c(1, 3, 7)) par(mfrow = c(1, 1)) #data(heart) # in package survival coxph(Surv(start, stop, event) ~ transplant* (age + surgery + year), data = heart) (stan <- coxph(Surv(start, stop, event) ~ transplant*year + age + surgery, data = heart)) stan1 <- coxph(Surv(start, stop, event) ~ strata(transplant) + year + year:transplant + age + surgery, heart) par(mfrow=c(1,2), pty="s") plot(survfit(stan1), conf.int = TRUE, log = TRUE, lty = c(1, 3), col = 2:3) #legend(locator(1), c("before", "after"), lty = c(1, 3), col= 2:3) attach(heart) plot(year[transplant==0], residuals(stan1, collapse = id), xlab = "year", ylab = "martingale residual") lines(lowess(year[transplant == 0], residuals(stan1, collapse = id))) par(mfrow = c(1,1), pty = "m") sresid <- resid(stan1, type = "dfbeta", collapse = id) detach() -100 * sresid %*% diag(1/stan1$coef) # Survivor curve for the "average" subject summary(survfit(stan)) # follow-up for two years stan2 <- data.frame(start = c(0, 183), stop= c(183, 2*365), event = c(0, 0), year = c(4, 4), age = c(50, 50) - 48, surgery = c(1, 1), transplant = as.factor(c(0, 1))) summary(survfit(stan, stan2, individual = TRUE, conf.type = "log-log")) # Aids analysis time.depend.covar <- function(data) { id <- row.names(data); n <- length(id) events <- c(0, 10043, 11139, 12053) # julian days crit1 <- matrix(events[1:3], n, 3 ,byrow = TRUE) crit2 <- matrix(events[2:4], n, 3, byrow = TRUE) diag <- matrix(data$diag,n,3); death <- matrix(data$death,n,3) incid <- (diag < crit2) & (death >= crit1); incid <- t(incid) indr <- col(incid)[incid]; indc <- row(incid)[incid] ind <- cbind(indr, indc); idno <- id[indr] state <- data$state[indr]; T.categ <- data$T.categ[indr] age <- data$age[indr]; sex <- data$sex[indr] late <- indc - 1 start <- t(pmax(crit1 - diag, 0))[incid] stop <- t(pmin(crit2, death + 0.9) - diag)[incid] status <- matrix(as.numeric(data$status),n,3)-1 # 0/1 status[death > crit2] <- 0; status <- status[ind] levels(state) <- c("NSW", "Other", "QLD", "VIC") levels(T.categ) <- c("hs", "hsid", "id", "het", "haem", "blood", "mother", "other") levels(sex) <- c("F", "M") data.frame(idno, zid=factor(late), start, stop, status, state, T.categ, age, sex) } Aids3 <- time.depend.covar(Aids2) attach(Aids3) aids.cox <- coxph(Surv(start, stop, status) ~ zid + state + T.categ + sex + age, data = Aids3) summary(aids.cox) aids1.cox <- coxph(Surv(start, stop, status) ~ zid + strata(state) + T.categ + age, data = Aids3) (aids1.surv <- survfit(aids1.cox)) plot(aids1.surv, mark.time = FALSE, lty = 1:4, col = 2:5, xscale = 365.25/12, xlab = "months since diagnosis") #legend(locator(1), levels(state), lty = 1:4, col = 2:5) aids2.cox <- coxph(Surv(start, stop, status) ~ zid + state + strata(T.categ) + age, data = Aids3) (aids2.surv <- survfit(aids2.cox)) par(mfrow = c(1, 2), pty="s") plot(aids2.surv[1:4], mark.time = FALSE, lty = 1:4, col = 2:5, xscale=365.25/12, xlab="months since diagnosis") #legend(locator(1), levels(T.categ)[1:4], lty = 1:4, col = 2:5) plot(aids2.surv[c(1, 5, 6, 8)], mark.time = FALSE, lty = 1:4, col = 2:5, xscale=365.25/12, xlab="months since diagnosis") #legend(locator(1), levels(T.categ)[c(1, 5, 6, 8)], lty = 1:4, col = 2:5) par(mfrow=c(1,1), pty="m") cases <- diff(c(0,idno)) != 0 aids.res <- residuals(aids.cox, collapse = idno) scatter.smooth(age[cases], aids.res, xlab = "age", ylab="martingale residual") age2 <- cut(age, c(-1, 15, 30, 40, 50, 60, 100)) c.age <- factor(as.numeric(age2), labels = c("0-15", "16-30", "31-40", "41-50", "51-60", "61+")) table(c.age) c.age <- relevel(c.age, "31-40") summary(coxph(Surv(start, stop, status) ~ zid + state + T.categ + age + c.age, data = Aids3)) detach() make.aidsp <- function(){ cutoff <- 10043 btime <- pmin(cutoff, Aids2$death) - pmin(cutoff, Aids2$diag) atime <- pmax(cutoff, Aids2$death) - pmax(cutoff, Aids2$diag) survtime <- btime + 0.5*atime status <- as.numeric(Aids2$status) data.frame(survtime, status = status - 1, state = Aids2$state, T.categ = Aids2$T.categ, age = Aids2$age, sex = Aids2$sex) } Aidsp <- make.aidsp() aids.wei <- survreg(Surv(survtime + 0.9, status) ~ state + T.categ + sex + age, data = Aidsp) summary(aids.wei, cor = FALSE) survreg(Surv(survtime + 0.9, status) ~ state + T.categ + age, data = Aidsp) (aids.ps <- survreg(Surv(survtime + 0.9, status) ~ state + T.categ + pspline(age, df=6), data = Aidsp)) zz <- predict(aids.ps, data.frame( state = factor(rep("NSW", 83), levels = levels(Aidsp$state)), T.categ = factor(rep("hs", 83), levels = levels(Aidsp$T.categ)), age = 0:82), se = T, type = "linear") plot(0:82, exp(zz$fit)/365.25, type = "l", ylim = c(0, 2), xlab = "age", ylab = "expected lifetime (years)") lines(0:82, exp(zz$fit+1.96*zz$se.fit)/365.25, lty = 3, col = 2) lines(0:82, exp(zz$fit-1.96*zz$se.fit)/365.25, lty = 3, col = 2) rug(Aidsp$age+runif(length(Aidsp$age), -0.5, 0.5), ticksize = 0.015) # End of ch13 MASS/inst/scripts/ch06.R0000644000176000001440000002516411754561330014411 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 6 Linear Statistical Models library(MASS) library(lattice) options(width=65, digits=5, height=9999) pdf(file="ch06.pdf", width=8, height=6, pointsize=9) options(contrasts = c("contr.helmert", "contr.poly")) # 6.1 A linear regression example xyplot(Gas ~ Temp | Insul, whiteside, panel = function(x, y, ...) { panel.xyplot(x, y, ...) panel.lmline(x, y, ...) }, xlab = "Average external temperature (deg. C)", ylab = "Gas consumption (1000 cubic feet)", aspect = "xy", strip = function(...) strip.default(..., style = 1)) gasB <- lm(Gas ~ Temp, data = whiteside, subset = Insul=="Before") gasA <- update(gasB, subset = Insul=="After") summary(gasB) summary(gasA) varB <- deviance(gasB)/gasB$df.resid # direct calculation varB <- summary(gasB)$sigma^2 # alternative gasBA <- lm(Gas ~ Insul/Temp - 1, data = whiteside) summary(gasBA) gasQ <- lm(Gas ~ Insul/(Temp + I(Temp^2)) - 1, data = whiteside) summary(gasQ)$coef # R: options(contrasts = c("contr.helmert", "contr.poly")) gasPR <- lm(Gas ~ Insul + Temp, data = whiteside) anova(gasPR, gasBA) oldcon <- options(contrasts = c("contr.treatment", "contr.poly")) gasBA1 <- lm(Gas ~ Insul*Temp, data = whiteside) summary(gasBA1)$coef options(oldcon) # 6.2 Model formulae and model matrices dat <- data.frame(a = factor(rep(1:3, 3)), y = rnorm(9, rep(2:4, 3), 0.1)) obj <- lm(y ~ a, dat) (alf.star <- coef(obj)) Ca <- contrasts(dat$a) # contrast matrix for `a' drop(Ca %*% alf.star[-1]) dummy.coef(obj) N <- factor(Nlevs <- c(0,1,2,4)) contrasts(N) contrasts(ordered(N)) N2 <- N contrasts(N2, 2) <- poly(Nlevs, 2) N2 <- C(N, poly(Nlevs, 2), 2) # alternative contrasts(N2) fractions(ginv(contr.helmert(n = 4))) Cp <- diag(-1, 4, 5); Cp[row(Cp) == col(Cp) - 1] <- 1 Cp fractions(ginv(Cp)) # 6.3 Regression diagnostics (hills.lm <- lm(time ~ dist + climb, data = hills)) frame() par(fig = c(0, 0.6, 0, 0.55)) plot(fitted(hills.lm), studres(hills.lm)) abline(h = 0, lty = 2) # identify(fitted(hills.lm), studres(hills.lm), row.names(hills)) par(fig = c(0.6, 1, 0, 0.55), pty = "s") qqnorm(studres(hills.lm)) qqline(studres(hills.lm)) par(pty = "m") hills.hat <- lm.influence(hills.lm)$hat cbind(hills, lev = hills.hat)[hills.hat > 3/35, ] cbind(hills, pred = predict(hills.lm))["Knock Hill", ] (hills1.lm <- update(hills.lm, subset = -18)) update(hills.lm, subset = -c(7, 18)) summary(hills1.lm) summary(update(hills1.lm, weights = 1/dist^2)) lm(time ~ -1 + dist + climb, hills[-18, ], weight = 1/dist^2) # hills <- hills # make a local copy (needed in S-PLUS) hills$ispeed <- hills$time/hills$dist hills$grad <- hills$climb/hills$dist (hills2.lm <- lm(ispeed ~ grad, data = hills[-18, ])) frame() par(fig = c(0, 0.6, 0, 0.55)) plot(hills$grad[-18], studres(hills2.lm), xlab = "grad") abline(h = 0, lty = 2) # identify(hills$grad[-18], studres(hills2.lm), row.names(hills)[-18]) par(fig = c(0.6, 1, 0, 0.55), pty = "s") qqnorm(studres(hills2.lm)) qqline(studres(hills2.lm)) par(pty = "m") hills2.hat <- lm.influence(hills2.lm)$hat cbind(hills[-18,], lev = hills2.hat)[hills2.hat > 1.8*2/34, ] # 6.4 Safe prediction quad1 <- lm(Weight ~ Days + I(Days^2), data = wtloss) quad2 <- lm(Weight ~ poly(Days, 2), data = wtloss) new.x <- data.frame(Days = seq(250, 300, 10), row.names = seq(250, 300, 10)) predict(quad1, newdata = new.x) predict(quad2, newdata = new.x) # predict.gam(quad2, newdata = new.x) # S-PLUS only # 6.5 Robust and resistant regression # library(lqs) phones.lm <- lm(calls ~ year, data = phones) attach(phones); plot(year, calls); detach() abline(phones.lm$coef) abline(rlm(calls ~ year, phones, maxit=50), lty = 2, col = 2) abline(lqs(calls ~ year, phones), lty =3, col = 3) # legend(locator(1), lty = 1:3, col = 1:3, # legend = c("least squares", "M-estimate", "LTS")) ## cor = FALSE is the default in R summary(lm(calls ~ year, data = phones)) summary(rlm(calls ~ year, maxit = 50, data = phones)) summary(rlm(calls ~ year, scale.est = "proposal 2", data = phones)) summary(rlm(calls ~ year, data = phones, psi = psi.bisquare)) lqs(calls ~ year, data = phones) lqs(calls ~ year, data = phones, method = "lms") lqs(calls ~ year, data = phones, method = "S") summary(rlm(calls ~ year, data = phones, method = "MM")) # library(robust) # S-PLUS only # phones.lmr <- lmRob(calls ~ year, data = phones) # summary(phones.lmr) # plot(phones.lmr) hills.lm hills1.lm # omitting Knock Hill rlm(time ~ dist + climb, data = hills) summary(rlm(time ~ dist + climb, data = hills, weights = 1/dist^2, method = "MM")) lqs(time ~ dist + climb, data = hills, nsamp = "exact") summary(hills2.lm) # omitting Knock Hill summary(rlm(ispeed ~ grad, data = hills)) summary(rlm(ispeed ~ grad, data = hills, method="MM")) # summary(lmRob(ispeed ~ grad, data = hills)) lqs(ispeed ~ grad, data = hills) # 6.6 Bootstrapping linear models library(boot) fit <- lm(calls ~ year, data = phones) ph <- data.frame(phones, res = resid(fit), fitted = fitted(fit)) ph.fun <- function(data, i) { d <- data d$calls <- d$fitted + d$res[i] coef(update(fit, data=d)) } (ph.lm.boot <- boot(ph, ph.fun, R = 999)) fit <- rlm(calls ~ year, method = "MM", data = phones) ph <- data.frame(phones, res = resid(fit), fitted = fitted(fit)) (ph.rlm.boot <- boot(ph, ph.fun, R = 999)) # 6.7 Factorial designs and designed experiments options(contrasts=c("contr.helmert", "contr.poly")) (npk.aov <- aov(yield ~ block + N*P*K, data = npk)) summary(npk.aov) alias(npk.aov) coef(npk.aov) options(contrasts=c("contr.treatment", "contr.poly")) npk.aov1 <- aov(yield ~ block + N + K, data = npk) summary.lm(npk.aov1) se.contrast(npk.aov1, list(N == "0", N == "1"), data = npk) model.tables(npk.aov1, type = "means", se = TRUE) mp <- c("-", "+") (NPK <- expand.grid(N = mp, P = mp, K = mp)) if(FALSE) { ## fac.design is part of S-PLUS. blocks13 <- fac.design(levels = c(2, 2, 2), factor= list(N=mp, P=mp, K=mp), rep = 3, fraction = 1/2) blocks46 <- fac.design(levels = c(2, 2, 2), factor = list(N=mp, P=mp, K=mp), rep = 3, fraction = ~ -N:P:K) NPK <- design(block = factor(rep(1:6, each = 4)), rbind(blocks13, blocks46)) i <- order(runif(6)[NPK$block], runif(24)) NPK <- NPK[i,] # Randomized lev <- rep(2, 7) factors <- list(S=mp, D=mp, H=mp, G=mp, R=mp, B=mp, P=mp) (Bike <- fac.design(lev, factors, fraction = ~ S:D:G + S:H:R + D:H:B + S:D:H:P)) replications(~ .^2, data=Bike) } if(require("FrF2")) { NPK <- FrF2(8, factor.names = c("N", "P", "K"), default.levels = 0:1, blocks = 2, replications = 3) print(NPK) print(as.data.frame(NPK)) print(Bike <- FrF2(factor.names = c("S", "D", "H", "G", "R", "B", "P"), default.levels = c("+", "-"), resolution = 3)) print(replications(~ .^2, data=Bike)) } # 6.8 An unbalanced four-way layout attach(quine) table(Lrn, Age, Sex, Eth) Means <- tapply(Days, list(Eth, Sex, Age, Lrn), mean) Vars <- tapply(Days, list(Eth, Sex, Age, Lrn), var) SD <- sqrt(Vars) par(mfrow = c(1, 2), pty="s") plot(Means, Vars, xlab = "Cell Means", ylab = "Cell Variances") plot(Means, SD, xlab = "Cell Means", ylab = "Cell Std Devn.") detach() ## singular.ok = TRUE is the default in R boxcox(Days+1 ~ Eth*Sex*Age*Lrn, data = quine, singular.ok = TRUE, lambda = seq(-0.05, 0.45, len = 20)) logtrans(Days ~ Age*Sex*Eth*Lrn, data = quine, alpha = seq(0.75, 6.5, len = 20), singular.ok = TRUE) quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) quine.nxt <- update(quine.hi, . ~ . - Eth:Sex:Age:Lrn) dropterm(quine.nxt, test = "F") quine.lo <- aov(log(Days+2.5) ~ 1, quine) addterm(quine.lo, quine.hi, test = "F") quine.stp <- stepAIC(quine.nxt, scope = list(upper = ~Eth*Sex*Age*Lrn, lower = ~1), trace = FALSE) quine.stp$anova dropterm(quine.stp, test = "F") quine.3 <- update(quine.stp, . ~ . - Eth:Age:Lrn) dropterm(quine.3, test = "F") quine.4 <- update(quine.3, . ~ . - Eth:Age) quine.5 <- update(quine.4, . ~ . - Age:Lrn) dropterm(quine.5, test = "F") # 6.9 Predicting computer performance par(mfrow = c(1, 2), pty = "s") boxcox(perf ~ syct + mmin + mmax + cach + chmin + chmax, data = cpus, lambda = seq(0, 1, 0.1)) cpus1 <- cpus attach(cpus) for(v in names(cpus)[2:7]) cpus1[[v]] <- cut(cpus[[v]], unique(quantile(cpus[[v]])), include.lowest = TRUE) detach() boxcox(perf ~ syct + mmin + mmax + cach + chmin + chmax, data = cpus1, lambda = seq(-0.25, 1, 0.1)) par(mfrow = c(1, 1), pty = "m") set.seed(123) cpus2 <- cpus[, 2:8] # excludes names, authors' predictions cpus2[, 1:3] <- log10(cpus2[, 1:3]) #cpus.samp <- sample(1:209, 100) cpus.samp <- c(3, 5, 6, 7, 8, 10, 11, 16, 20, 21, 22, 23, 24, 25, 29, 33, 39, 41, 44, 45, 46, 49, 57, 58, 62, 63, 65, 66, 68, 69, 73, 74, 75, 76, 78, 83, 86, 88, 98, 99, 100, 103, 107, 110, 112, 113, 115, 118, 119, 120, 122, 124, 125, 126, 127, 132, 136, 141, 144, 146, 147, 148, 149, 150, 151, 152, 154, 156, 157, 158, 159, 160, 161, 163, 166, 167, 169, 170, 173, 174, 175, 176, 177, 183, 184, 187, 188, 189, 194, 195, 196, 197, 198, 199, 202, 204, 205, 206, 208, 209) cpus.lm <- lm(log10(perf) ~ ., data = cpus2[cpus.samp, ]) test.cpus <- function(fit) sqrt(sum((log10(cpus2[-cpus.samp, "perf"]) - predict(fit, cpus2[-cpus.samp,]))^2)/109) test.cpus(cpus.lm) cpus.lm2 <- stepAIC(cpus.lm, trace=FALSE) cpus.lm2$anova test.cpus(cpus.lm2) # 6.10 Multiple comparisons immer.aov <- aov((Y1 + Y2)/2 ~ Var + Loc, data = immer) summary(immer.aov) model.tables(immer.aov, type = "means", se = TRUE, cterms = "Var") if(FALSE) { multicomp(immer.aov, plot = TRUE) oats1 <- aov(Y ~ N + V + B, data = oats) summary(oats1) multicomp(oats1, focus = "V") multicomp(oats1, focus = "N", comparisons = "mcc", control = 1) lmat <- matrix(c(0,-1,1,rep(0, 11), 0,0,-1,1, rep(0,10), 0,0,0,-1,1,rep(0,9)),,3, dimnames = list(NULL, c("0.2cwt-0.0cwt", "0.4cwt-0.2cwt", "0.6cwt-0.4cwt"))) multicomp(oats1, lmat = lmat, bounds = "lower", comparisons = "none") } (tk <- TukeyHSD(immer.aov, which = "Var")) plot(tk) oats1 <- aov(Y ~ N + V + B, data = oats) (tk <- TukeyHSD(oats1, which = "V")) plot(tk) ## An alternative under R is to use package multcomp (which requires mvtnorm) ## This code is for multcomp >= 0.991-1 library(multcomp) ## next is slow: (tk <- confint(glht(immer.aov, linfct = mcp(Var = "Tukey")))) plot(tk) confint(glht(oats1, linfct = mcp(V = "Tukey"))) lmat <- matrix(c(0,-1,1,rep(0, 11), 0,0,-1,1, rep(0,10), 0,0,0,-1,1,rep(0,9)),,3, dimnames = list(NULL, c("0.2cwt-0.0cwt", "0.4cwt-0.2cwt", "0.6cwt-0.4cwt"))) confint(glht(oats1, linfct = mcp(N = t(lmat[2:5, ])), alternative = "greater")) plot(tk) # End of ch06 MASS/inst/scripts/ch16.R0000644000176000001440000001231211754561330014401 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 16 Optimization and Mazimum Likelihood Estimation library(MASS) pdf(file="ch16.pdf", width=8, height=8, pointsize=9) options(width=65, digits=5) # 16.3 General optimization attach(geyser) truehist(waiting, xlim = c(35, 110), ymax = 0.04, h = 5) wait.dns <- density(waiting, n = 512, width = "SJ") lines(wait.dns, lty = 2) lmix2 <- deriv3( ~ -log(p*dnorm((x-u1)/s1)/s1 + (1-p)*dnorm((x-u2)/s2)/s2), c("p", "u1", "s1", "u2", "s2"), function(x, p, u1, s1, u2, s2) NULL) (p0 <- c(p = mean(waiting < 70), u1 = 50, s1 = 5, u2 = 80, s2 = 5)) ## using optim mix.obj <- function(p, x) { e <- p[1] * dnorm((x - p[2])/p[3])/p[3] + (1 - p[1]) * dnorm((x - p[4])/p[5])/p[5] if(any(e <= 0)) Inf else -sum(log(e)) } optim(p0, mix.obj, x = waiting)$par # Nelder-Mead optim(p0, mix.obj, x = waiting, method = "BFGS", control = list(parscale= c(0.1, rep(1, 4))))$par # with derivatives lmix2a <- deriv( ~ -log(p*dnorm((x-u1)/s1)/s1 + (1-p)*dnorm((x-u2)/s2)/s2), c("p", "u1", "s1", "u2", "s2"), function(x, p, u1, s1, u2, s2) NULL) mix.gr <- function(p, x) { u1 <- p[2]; s1 <- p[3]; u2 <- p[4]; s2 <- p[5]; p <- p[1] colSums(attr(lmix2a(x, p, u1, s1, u2, s2), "gradient")) } optim(p0, mix.obj, mix.gr, x = waiting, method = "BFGS", control = list(parscale= c(0.1, rep(1, 4))))$par mix.nl0 <- optim(p0, mix.obj, mix.gr, method = "L-BFGS-B", hessian = TRUE, lower = c(0, -Inf, 0, -Inf, 0), upper = c(1, rep(Inf, 4)), x = waiting) rbind(est = mix.nl0$par, se = sqrt(diag(solve(mix.nl0$hessian)))) dmix2 <- function(x, p, u1, s1, u2, s2) p * dnorm(x, u1, s1) + (1-p) * dnorm(x, u2, s2) attach(as.list(mix.nl0$par)) wait.fdns <- list(x = wait.dns$x, y = dmix2(wait.dns$x, p, u1, s1, u2, s2)) lines(wait.fdns) par(usr = c(0, 1, 0, 1)) legend(0.1, 0.9, c("Normal mixture", "Nonparametric"), lty = c(1, 2), bty = "n") pmix2 <- deriv(~ p*pnorm((x-u1)/s1) + (1-p)*pnorm((x-u2)/s2), "x", function(x, p, u1, s1, u2, s2) {}) pr0 <- (seq(along = waiting) - 0.5)/length(waiting) x0 <- x1 <- as.vector(sort(waiting)) ; del <- 1; i <- 0 while((i <- 1 + 1) < 10 && abs(del) > 0.0005) { pr <- pmix2(x0, p, u1, s1, u2, s2) del <- (pr - pr0)/attr(pr, "gradient") x0 <- x0 - 0.5*del cat(format(del <- max(abs(del))), "\n") } detach() par(pty = "s") plot(x0, x1, xlim = range(x0, x1), ylim = range(x0, x1), xlab = "Model quantiles", ylab = "Waiting time") abline(0, 1) par(pty = "m") mix1.obj <- function(p, x, y) { q <- exp(p[1] + p[2]*y) q <- q/(1 + q) e <- q * dnorm((x - p[3])/p[4])/p[4] + (1 - q) * dnorm((x - p[5])/p[6])/p[6] if(any(e <= 0)) Inf else -sum(log(e)) } p1 <- mix.nl0$par; tmp <- as.vector(p1[1]) p2 <- c(a = log(tmp/(1-tmp)), b = 0, p1[-1]) mix.nl1 <- optim(p2, mix1.obj, method = "L-BFGS-B", lower = c(-Inf, -Inf, -Inf, 0, -Inf, 0), upper = rep(Inf, 6), hessian = TRUE, x = waiting[-1], y = duration[-299]) rbind(est = mix.nl1$par, se = sqrt(diag(solve(mix.nl1$hessian)))) if(!exists("bwt")) { attach(birthwt) race <- factor(race, labels=c("white", "black", "other")) ptd <- factor(ptl > 0) ftv <- factor(ftv); levels(ftv)[-(1:2)] <- "2+" bwt <- data.frame(low=factor(low), age, lwt, race, smoke=(smoke>0), ptd, ht=(ht>0), ui=(ui>0), ftv) detach(); rm(race, ptd, ftv) } logitreg <- function(x, y, wt = rep(1, length(y)), intercept = TRUE, start = rep(0, p), ...) { fmin <- function(beta, X, y, w) { p <- plogis(X %*% beta) -sum(2 * w * ifelse(y, log(p), log(1-p))) } gmin <- function(beta, X, y, w) { eta <- X %*% beta; p <- plogis(eta) -2 * matrix(w *dlogis(eta) * ifelse(y, 1/p, -1/(1-p)), 1) %*% X } if(is.null(dim(x))) dim(x) <- c(length(x), 1) dn <- dimnames(x)[[2]] if(!length(dn)) dn <- paste("Var", 1:ncol(x), sep="") p <- ncol(x) + intercept if(intercept) {x <- cbind(1, x); dn <- c("(Intercept)", dn)} if(is.factor(y)) y <- (unclass(y) != 1) fit <- optim(start, fmin, gmin, X = x, y = y, w = wt, method = "BFGS", ...) names(fit$par) <- dn cat("\nCoefficients:\n"); print(fit$par) # R: use fit$value and fit$convergence cat("\nResidual Deviance:", format(fit$value), "\n") if(fit$convergence > 0) cat("\nConvergence code:", fit$convergence, "\n") invisible(fit) } options(contrasts = c("contr.treatment", "contr.poly")) X <- model.matrix(terms(low ~ ., data=bwt), data = bwt)[, -1] logitreg(X, bwt$low) AIDSfit <- function(y, z, start=rep(mean(y), ncol(z)), ...) { deviance <- function(beta, y, z) { mu <- z %*% beta 2 * sum(mu - y - y*log(mu/y)) } grad <- function(beta, y, z) { mu <- z %*% beta 2 * t(1 - y/mu) %*% z } optim(start, deviance, grad, lower = 0, y = y, z = z, method = "L-BFGS-B", ...) } Y <- scan() 12 14 33 50 67 74 123 141 165 204 253 246 240 library(nnet) # for class.ind s <- seq(0, 13.999, 0.01); tint <- 1:14 X <- expand.grid(s, tint) Z <- matrix(pweibull(pmax(X[,2] - X[,1],0), 2.5, 10),length(s)) Z <- Z[,2:14] - Z[,1:13] Z <- t(Z) %*% class.ind(factor(floor(s/2))) * 0.01 round(AIDSfit(Y, Z)$par) rm(s, X, Y, Z) # End of ch16 MASS/inst/scripts/ch05.R0000644000176000001440000001572112072770502014403 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 5 Univariate Statistics # for later use, from section 5.6 perm.t.test <- function(d) { # ttest is function(x) mean(x)/sqrt(var(x)/length(x)) binary.v <- function(x, digits) { if(missing(digits)) { mx <- max(x) digits <- if(mx > 0) 1 + floor(log(mx, base = 2)) else 1 } ans <- 0:(digits - 1) lx <- length(x) x <- rep(x, rep(digits, lx)) x <- (x %/% 2^ans) %% 2 dim(x) <- c(digits, lx) x } digits <- length(d) n <- 2^digits x <- d * 2 * (binary.v(1:n, digits) - 0.5) mx <- matrix(1/digits, 1, digits) %*% x s <- matrix(1/(digits - 1), 1, digits) vx <- s %*% (x - matrix(mx, digits, n, byrow=TRUE))^2 as.vector(mx/sqrt(vx/digits)) } library(MASS) options(width=65, digits=5, height=9999) library(lattice) pdf(file="ch05.pdf", width=8, height=6, pointsize=9) rm(A, B) # precautionary clear-out attach(shoes) tperm <- perm.t.test(B - A) # see section 5.6 detach() # from ch04 if(!exists("fgl.df")) { fgl0 <- fgl[ ,-10] # omit type. fgl.df <- data.frame(type = rep(fgl$type, 9), y = as.vector(as.matrix(fgl0)), meas = factor(rep(1:9, each = 214), labels = names(fgl0))) invisible() } # 5.1 Probability distributions x <- rt(250, df = 9) par(pty = "s") qqnorm(x) qqline(x) par(pty = "m") x <- rgamma(100, shape = 5, rate = 0.1) fitdistr(x, "gamma") x2 <- rt(250, df = 9) fitdistr(x2, "t", df = 9) fitdistr(x2, "t") # 5.2 Generating random data contam <- rnorm( 100, 0, (1 + 2*rbinom(100, 1, 0.05)) ) # 5.3 Data summaries par(mfrow=c(2,3)) hist(geyser$duration, "scott", xlab="duration") hist(chem, "scott") hist(tperm, "scott") hist(geyser$duration, "FD", xlab="duration") hist(chem, "FD") hist(tperm, "FD") par(mfrow=c(1,1)) swiss.fertility <- swiss[, 1] stem(swiss.fertility) stem(chem) stem(abbey) stem(abbey, scale = 0.4) ## use scale = 0.4 in R par(mfrow = c(1,2)) boxplot(chem, sub = "chem", range = 0.5) boxplot(abbey, sub = "abbey") par(mfrow = c(1,1)) bwplot(type ~ y | meas, data = fgl.df, scales = list(x="free"), strip = function(...) strip.default(..., style=1), xlab = "") # 5.4 Classical univariate statistics attach(shoes) t.test(A, mu = 10) t.test(A)$conf.int wilcox.test(A, mu = 10) var.test(A, B) t.test(A, B, var.equal = TRUE) t.test(A, B, var.equal = FALSE) wilcox.test(A, B) t.test(A, B, paired = TRUE) wilcox.test(A, B, paired = TRUE) detach() par(mfrow = c(1, 2)) truehist(tperm, xlab = "diff") x <- seq(-4,4, 0.1) lines(x, dt(x,9)) #cdf.compare(tperm, distribution = "t", df = 9) sres <- c(sort(tperm), 4) yres <- (0:1024)/1024 plot(sres, yres, type="S", xlab="diff", ylab="") lines(x, pt(x,9), lty=3) legend(-5, 1.05, c("Permutation dsn","t_9 cdf"), lty = c(1,3)) par(mfrow = c(1, 1)) # 5.5 Robust summaries # Figure 5.7 was obtained by x <- seq(-10, 10, len=500) y <- dt(x, 25, log = TRUE) z <- -diff(y)/diff(x) plot(x[-1], z, type = "l", xlab = "", ylab = "psi") y2 <- dt(x, 5, log = TRUE) z2 <- -diff(y2)/diff(x) lines(x[-1], z2, lty = 2) sort(chem) mean(chem) median(chem) #location.m(chem) #location.m(chem, psi.fun="huber") mad(chem) #scale.tau(chem) #scale.tau(chem, center=3.68) unlist(huber(chem)) unlist(hubers(chem)) fitdistr(chem, "t", list(m = 3, s = 0.5), df = 5) sort(abbey) mean(abbey) median(abbey) #location.m(abbey) #location.m(abbey, psi.fun="huber") unlist(hubers(abbey)) unlist(hubers(abbey, k = 2)) unlist(hubers(abbey, k = 1)) fitdistr(abbey, "t", list(m = 12, s = 5), df = 10) # 5.6 Density estimation # Figure 5.8 attach(geyser) par(mfrow=c(2,3)) truehist(duration, h=0.5, x0=0.0, xlim=c(0, 6), ymax=0.7) truehist(duration, h=0.5, x0=0.1, xlim=c(0, 6), ymax=0.7) truehist(duration, h=0.5, x0=0.2, xlim=c(0, 6), ymax=0.7) truehist(duration, h=0.5, x0=0.3, xlim=c(0, 6), ymax=0.7) truehist(duration, h=0.5, x0=0.4, xlim=c(0, 6), ymax=0.7) breaks <- seq(0, 5.9, 0.1) counts <- numeric(length(breaks)) for(i in (0:4)) counts[i+(1:55)] <- counts[i+(1:55)] + rep(hist(duration, breaks=0.1*i + seq(0, 5.5, 0.5), prob=TRUE, plot=FALSE)$density, rep(5,11)) plot(breaks+0.05, counts/5, type="l", xlab="duration", ylab="averaged", bty="n", xlim=c(0, 6), ylim=c(0, 0.7)) detach() attach(geyser) truehist(duration, nbins = 15, xlim = c(0.5, 6), ymax = 1.2) lines(density(duration, width = "nrd")) truehist(duration, nbins = 15, xlim = c(0.5, 6), ymax = 1.2) lines(density(duration, width = "SJ", n = 256), lty = 3) lines(density(duration, n = 256, width = "SJ-dpi"), lty = 1) detach() gal <- galaxies/1000 plot(x = c(0, 40), y = c(0, 0.3), type = "n", bty = "l", xlab = "velocity of galaxy (1000km/s)", ylab = "density") rug(gal) lines(density(gal, width = "SJ-dpi", n = 256), lty = 1) lines(density(gal, width = "SJ", n = 256), lty = 3) library(polspline) x <- seq(5, 40, length = 500) lines(x, doldlogspline(x, oldlogspline(gal)), lty = 2) geyser2 <- data.frame(as.data.frame(geyser)[-1, ], pduration = geyser$duration[-299]) attach(geyser2) par(mfrow = c(2, 2)) plot(pduration, waiting, xlim = c(0.5, 6), ylim = c(40, 110), xlab = "previous duration", ylab = "waiting") f1 <- kde2d(pduration, waiting, n = 50, lims=c(0.5, 6, 40, 110)) image(f1, zlim = c(0, 0.075), xlab = "previous duration", ylab = "waiting") f2 <- kde2d(pduration, waiting, n = 50, lims=c(0.5, 6, 40, 110), h = c(width.SJ(duration), width.SJ(waiting)) ) image(f2, zlim = c(0, 0.075), xlab = "previous duration", ylab = "waiting") persp(f2, phi = 30, theta = 20, d = 5, xlab = "previous duration", ylab = "waiting", zlab = "") detach() density(gal, n = 1, from = 20.833, to = 20.834, width = "SJ")$y 1/(2 * sqrt(length(gal)) * 0.13) set.seed(101) m <- 1000 res <- numeric(m) for (i in 1:m) res[i] <- median(sample(gal, replace = TRUE)) mean(res - median(gal)) sqrt(var(res)) truehist(res, h = 0.1) lines(density(res, width = "SJ-dpi", n = 256)) quantile(res, p = c(0.025, 0.975)) x <- seq(19.5, 22.5, length = 500) lines(x, doldlogspline(x, oldlogspline(res)), lty = 3) library(boot) set.seed(101) gal.boot <- boot(gal, function(x, i) median(x[i]), R = 1000) gal.boot boot.ci(gal.boot, conf = c(0.90, 0.95), type = c("norm","basic","perc","bca")) plot(gal.boot) if(FALSE) { # bootstrap() is an S-PLUS function gal.bt <- bootstrap(gal, median, seed = 101, B = 1000) summary(gal.bt) plot(gal.bt) qqnorm(gal.bt) limits.emp(gal.bt) limits.bca(gal.bt) } sim.gen <- function(data, mle) { n <- length(data) data[sample(n, replace = TRUE)] + mle*rnorm(n) } gal.boot2 <- boot(gal, median, R = 1000, sim = "parametric", ran.gen = sim.gen, mle = 0.5) boot.ci(gal.boot2, conf = c(0.90, 0.95), type = c("norm","basic","perc")) attach(shoes) t.test(B - A) shoes.boot <- boot(B - A, function(x,i) mean(x[i]), R = 1000) boot.ci(shoes.boot, type = c("norm", "basic", "perc", "bca")) mean.fun <- function(d, i) { n <- length(i) c(mean(d[i]), (n-1)*var(d[i])/n^2) } shoes.boot2 <- boot(B - A, mean.fun, R = 1000) boot.ci(shoes.boot2, type = "stud") detach() # End of ch05 MASS/inst/scripts/ch08.R0000644000176000001440000003421612171730445014410 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 8 Non-linear and Smooth Regression library(MASS) library(lattice) options(width=65, digits=5, height=9999) pdf(file="ch08.pdf", width=8, height=6, pointsize=9) # From Chapter 6, for comparisons set.seed(123) cpus.samp <- c(3, 5, 6, 7, 8, 10, 11, 16, 20, 21, 22, 23, 24, 25, 29, 33, 39, 41, 44, 45, 46, 49, 57, 58, 62, 63, 65, 66, 68, 69, 73, 74, 75, 76, 78, 83, 86, 88, 98, 99, 100, 103, 107, 110, 112, 113, 115, 118, 119, 120, 122, 124, 125, 126, 127, 132, 136, 141, 144, 146, 147, 148, 149, 150, 151, 152, 154, 156, 157, 158, 159, 160, 161, 163, 166, 167, 169, 170, 173, 174, 175, 176, 177, 183, 184, 187, 188, 189, 194, 195, 196, 197, 198, 199, 202, 204, 205, 206, 208, 209) cpus1 <- cpus attach(cpus) for(v in names(cpus)[2:7]) cpus1[[v]] <- cut(cpus[[v]], unique(quantile(cpus[[v]])), include.lowest = TRUE) detach() cpus.lm <- lm(log10(perf) ~ ., data=cpus1[cpus.samp, 2:8]) cpus.lm2 <- stepAIC(cpus.lm, trace=FALSE) res2 <- log10(cpus1[-cpus.samp, "perf"]) - predict(cpus.lm2, cpus1[-cpus.samp,]) cpus2 <- cpus[, 2:8] # excludes names, authors' predictions cpus2[, 1:3] <- log10(cpus2[, 1:3]) test.cpus <- function(fit) sqrt(sum((log10(cpus2[-cpus.samp, "perf"]) - predict(fit, cpus2[-cpus.samp,]))^2)/109) # 8.1 An introductory example attach(wtloss) # alter margin 4; others are default oldpar <- par(mar = c(5.1, 4.1, 4.1, 4.1)) plot(Days, Weight, type = "p", ylab = "Weight (kg)") Wt.lbs <- pretty(range(Weight*2.205)) axis(side = 4, at = Wt.lbs/2.205, lab = Wt.lbs, srt = 90) mtext("Weight (lb)", side = 4, line = 3) par(oldpar) # restore settings detach() # 8.2 Fitting non-linear regression models wtloss.st <- c(b0 = 90, b1 = 95, th = 120) wtloss.fm <- nls(Weight ~ b0 + b1*2^(-Days/th), data = wtloss, start = wtloss.st, trace = TRUE) wtloss.fm expn <- function(b0, b1, th, x) { temp <- 2^(-x/th) model.func <- b0 + b1 * temp Z <- cbind(1, temp, (b1 * x * temp * log(2))/th^2) dimnames(Z) <- list(NULL, c("b0", "b1", "th")) attr(model.func, "gradient") <- Z model.func } wtloss.gr <- nls(Weight ~ expn(b0, b1, th, Days), data = wtloss, start = wtloss.st, trace = TRUE) expn1 <- deriv(y ~ b0 + b1 * 2^(-x/th), c("b0", "b1", "th"), function(b0, b1, th, x) {}) negexp <- selfStart(model = ~ b0 + b1*exp(-x/th), initial = negexp.SSival, parameters = c("b0", "b1", "th"), template = function(x, b0, b1, th) {}) wtloss.ss <- nls(Weight ~ negexp(Days, B0, B1, theta), data = wtloss, trace = TRUE) # 8.3 Non-linear fitted model objects and method functions summary(wtloss.gr) deviance(wtloss.gr) vcov(wtloss.gr) A <- model.matrix(~ Strip - 1, data = muscle) rats.nls1 <- nls(log(Length) ~ cbind(A, rho^Conc), data = muscle, start = c(rho = 0.1), algorithm = "plinear") (B <- coef(rats.nls1)) st <- list(alpha = B[2:22], beta = B[23], rho = B[1]) rats.nls2 <- nls(log(Length) ~ alpha[Strip] + beta*rho^Conc, data = muscle, start = st) attach(muscle) Muscle <- expand.grid(Conc = sort(unique(Conc)), Strip = levels(Strip)) Muscle$Yhat <- predict(rats.nls2, Muscle) Muscle$logLength <- rep(NA, nrow(Muscle)) ind <- match(paste(Strip, Conc), paste(Muscle$Strip, Muscle$Conc)) Muscle$logLength[ind] <- log(Length) detach() xyplot(Yhat ~ Conc | Strip, Muscle, as.table = TRUE, ylim = range(c(Muscle$Yhat, Muscle$logLength), na.rm = TRUE), subscripts = TRUE, xlab = "Calcium Chloride concentration (mM)", ylab = "log(Length in mm)", panel = function(x, y, subscripts, ...) { panel.xyplot(x, Muscle$logLength[subscripts], ...) llines(spline(x, y)) }) # 8.5 Confidence intervals for parameters expn2 <- deriv(~b0 + b1*((w0 - b0)/b1)^(x/d0), c("b0","b1","d0"), function(b0, b1, d0, x, w0) {}) wtloss.init <- function(obj, w0) { p <- coef(obj) d0 <- - log((w0 - p["b0"])/p["b1"])/log(2) * p["th"] c(p[c("b0", "b1")], d0 = as.vector(d0)) } out <- NULL w0s <- c(110, 100, 90) for(w0 in w0s) { fm <- nls(Weight ~ expn2(b0, b1, d0, Days, w0), wtloss, start = wtloss.init(wtloss.gr, w0)) out <- rbind(out, c(coef(fm)["d0"], confint(fm, "d0"))) } dimnames(out)[[1]] <- paste(w0s,"kg:") out fm0 <- lm(Wt*Time ~ Viscosity + Time - 1, data = stormer) b0 <- coef(fm0) names(b0) <- c("b1", "b2") b0 storm.fm <- nls(Time ~ b1*Viscosity/(Wt-b2), data = stormer, start = b0, trace = TRUE) bc <- coef(storm.fm) se <- sqrt(diag(vcov(storm.fm))) dv <- deviance(storm.fm) par(pty = "s") b1 <- bc[1] + seq(-3*se[1], 3*se[1], length = 51) b2 <- bc[2] + seq(-3*se[2], 3*se[2], length = 51) bv <- expand.grid(b1, b2) attach(stormer) ssq <- function(b) sum((Time - b[1] * Viscosity/(Wt-b[2]))^2) dbetas <- apply(bv, 1, ssq) cc <- matrix(Time - rep(bv[,1],rep(23, 2601)) * Viscosity/(Wt - rep(bv[,2], rep(23, 2601))), 23) dbetas <- matrix(drop(rep(1, 23) %*% cc^2), 51) fstat <- matrix( ((dbetas - dv)/2) / (dv/21), 51, 51) qf(0.95, 2, 21) plot(b1, b2, type = "n") lev <- c(1, 2, 5, 7, 10, 15, 20) contour(b1, b2, fstat, levels = lev, labex = 0.75, lty = 2, add = TRUE) contour(b1, b2, fstat, levels = qf(0.95,2,21), add = TRUE, labex = 0) text(31.6, 0.3, labels = "95% CR", adj = 0, cex = 0.75) points(bc[1], bc[2], pch = 3, mkh = 0.1) detach() par(pty = "m") library(boot) storm.fm <- nls(Time ~ b*Viscosity/(Wt - c), stormer, start = c(b=29.401, c=2.2183)) summary(storm.fm)$parameters st <- cbind(stormer, fit=fitted(storm.fm)) storm.bf <- function(rs, i) { # st <- st # for S-PLUS st$Time <- st$fit + rs[i] coef(nls(Time ~ b * Viscosity/(Wt - c), st, start = coef(storm.fm))) } rs <- scale(resid(storm.fm), scale = FALSE) # remove the mean (storm.boot <- boot(rs, storm.bf, R = 9999)) ## slow boot.ci(storm.boot, index = 1, type = c("norm", "basic", "perc", "bca")) boot.ci(storm.boot, index = 2, type = c("norm", "basic", "perc", "bca")) # 8.5 Assessing the linear approximation opar <- par(pty = "m", mfrow = c(1, 3)) plot(profile(update(wtloss.gr, trace = FALSE))) par(opar) # 8.7 One-dimensional curve fitting attach(GAGurine) par(mfrow = c(3, 2)) plot(Age, GAG, main = "Degree 6 polynomial") GAG.lm <- lm(GAG ~ Age + I(Age^2) + I(Age^3) + I(Age^4) + I(Age^5) + I(Age^6) + I(Age^7) + I(Age^8)) anova(GAG.lm) GAG.lm2 <- lm(GAG ~ Age + I(Age^2) + I(Age^3) + I(Age^4) + I(Age^5) + I(Age^6)) xx <- seq(0, 17, len = 200) lines(xx, predict(GAG.lm2, data.frame(Age = xx))) library(splines) plot(Age, GAG, type = "n", main = "Splines") lines(Age, fitted(lm(GAG ~ ns(Age, df = 5)))) lines(Age, fitted(lm(GAG ~ ns(Age, df = 10))), lty = 3) lines(Age, fitted(lm(GAG ~ ns(Age, df = 20))), lty = 4) lines(smooth.spline(Age, GAG), lwd = 3) legend(12, 50, c("df=5", "df=10", "df=20", "Smoothing"), lty = c(1, 3, 4, 1), lwd = c(1,1,1,3), bty = "n") plot(Age, GAG, type = "n", main = "loess") lines(loess.smooth(Age, GAG)) plot(Age, GAG, type = "n", main = "supsmu") lines(supsmu(Age, GAG)) lines(supsmu(Age, GAG, bass = 3), lty = 3) lines(supsmu(Age, GAG, bass = 10), lty = 4) legend(12, 50, c("default", "base = 3", "base = 10"), lty = c(1, 3, 4), bty = "n") plot(Age, GAG, type = "n", main = "ksmooth") lines(ksmooth(Age, GAG, "normal", bandwidth = 1)) lines(ksmooth(Age, GAG, "normal", bandwidth = 5), lty = 3) legend(12, 50, c("width = 1", "width = 5"), lty = c(1, 3), bty = "n") library(KernSmooth) plot(Age, GAG, type = "n", main = "locpoly") (h <- dpill(Age, GAG)) lines(locpoly(Age, GAG, degree = 0, bandwidth = h)) lines(locpoly(Age, GAG, degree = 1, bandwidth = h), lty = 3) lines(locpoly(Age, GAG, degree = 2, bandwidth = h), lty = 4) legend(12, 50, c("const", "linear", "quadratic"), lty = c(1, 3, 4), bty = "n") detach() # 8.8 Additive models ## R has a different gam() in package mgcv library(mgcv) rock.lm <- lm(log(perm) ~ area + peri + shape, data = rock) summary(rock.lm) (rock.gam <- gam(log(perm) ~ s(area) + s(peri) + s(shape), data=rock)) #summary(rock.gam) #anova(rock.lm, rock.gam) par(mfrow = c(2, 3), pty = "s") plot(rock.gam, se = TRUE, pages = 0) rock.gam1 <- gam(log(perm) ~ area + peri + s(shape), data = rock) plot(rock.gam1, se = TRUE) par(pty="m") #anova(rock.lm, rock.gam1, rock.gam) library(mda) rock.bruto <- bruto(rock[, -4], rock[, 4]) rock.bruto$type rock.bruto$df Xin <- as.matrix(cpus2[cpus.samp, 1:6]) test2 <- function(fit) { Xp <- as.matrix(cpus2[-cpus.samp, 1:6]) sqrt(sum((log10(cpus2[-cpus.samp, "perf"]) - predict(fit, Xp))^2)/109) } cpus.bruto <- bruto(Xin, log10(cpus2[cpus.samp, 7])) test2(cpus.bruto) cpus.bruto$type cpus.bruto$df # examine the fitted functions par(mfrow = c(3, 2)) Xp <- matrix(sapply(cpus2[cpus.samp, 1:6], mean), 100, 6, byrow = TRUE) for(i in 1:6) { xr <- sapply(cpus2, range) Xp1 <- Xp; Xp1[, i] <- seq(xr[1, i], xr[2, i], len = 100) Xf <- predict(cpus.bruto, Xp1) plot(Xp1[ ,i], Xf, xlab=names(cpus2)[i], ylab= "", type = "l") } cpus.mars <- mars(Xin, log10(cpus2[cpus.samp,7])) showcuts <- function(obj) { tmp <- obj$cuts[obj$sel, ] dimnames(tmp) <- list(NULL, dimnames(Xin)[[2]]) tmp } showcuts(cpus.mars) test2(cpus.mars) # examine the fitted functions Xp <- matrix(sapply(cpus2[cpus.samp, 1:6], mean), 100, 6, byrow = TRUE) for(i in 1:6) { xr <- sapply(cpus2, range) Xp1 <- Xp; Xp1[, i] <- seq(xr[1, i], xr[2, i], len = 100) Xf <- predict(cpus.mars, Xp1) plot(Xp1[ ,i], Xf, xlab = names(cpus2)[i], ylab = "", type = "l") } cpus.mars2 <- mars(Xin, log10(cpus2[cpus.samp,7]), degree = 2) showcuts(cpus.mars2) test2(cpus.mars2) cpus.mars6 <- mars(Xin, log10(cpus2[cpus.samp,7]), degree = 6) showcuts(cpus.mars6) test2(cpus.mars6) if(require(acepack)) { attach(cpus2) cpus.avas <- avas(cpus2[, 1:6], perf) plot(log10(perf), cpus.avas$ty) par(mfrow = c(2, 3)) for(i in 1:6) { o <- order(cpus2[, i]) plot(cpus2[o, i], cpus.avas$tx[o, i], type = "l", xlab = names(cpus2[i]), ylab = "") } detach() } # 8.9 Projection-pursuit regression attach(rock) rock1 <- data.frame(area = area/10000, peri = peri/10000, shape = shape, perm = perm) detach() (rock.ppr <- ppr(log(perm) ~ area + peri + shape, data = rock1, nterms = 2, max.terms = 5)) rock.ppr summary(rock.ppr) par(mfrow = c(3, 2)) plot(rock.ppr) plot(update(rock.ppr, bass = 5)) plot(rock.ppr2 <- update(rock.ppr, sm.method = "gcv", gcvpen = 2)) par(mfrow = c(1, 1)) summary(rock.ppr2) summary(rock1) # to find the ranges of the variables Xp <- expand.grid(area = seq(0.1, 1.2, 0.05), peri = seq(0, 0.5, 0.02), shape = 0.2) rock.grid <- cbind(Xp, fit = predict(rock.ppr2, Xp)) wireframe(fit ~ area + peri, rock.grid, screen = list(z=160,x=-60), aspect = c(1, 0.5), drape = TRUE) # or persp(seq(0.1, 1.2, 0.05), seq(0, 0.5, 0.02), matrix(rock.grid$fit, 23), d = 5, theta = -160, phi = 30, zlim = c(-1, 15)) (cpus.ppr <- ppr(log10(perf) ~ ., data = cpus2[cpus.samp,], nterms = 2, max.terms = 10, bass = 5)) cpus.ppr <- ppr(log10(perf) ~ ., data = cpus2[cpus.samp,], nterms = 8, max.terms = 10, bass = 5) test.cpus(cpus.ppr) ppr(log10(perf) ~ ., data = cpus2[cpus.samp,], nterms = 2, max.terms = 10, sm.method = "spline") cpus.ppr2 <- ppr(log10(perf) ~ ., data = cpus2[cpus.samp,], nterms = 7, max.terms = 10, sm.method = "spline") test.cpus(cpus.ppr2) res3 <- log10(cpus2[-cpus.samp, "perf"]) - predict(cpus.ppr, cpus2[-cpus.samp,]) wilcox.test(res2^2, res3^2, paired = TRUE, alternative = "greater") # 8.10 Neural networks library(nnet) attach(rock) area1 <- area/10000; peri1 <- peri/10000 rock1 <- data.frame(perm, area = area1, peri = peri1, shape) rock.nn <- nnet(log(perm) ~ area + peri + shape, rock1, size = 3, decay = 1e-3, linout = TRUE, skip = TRUE, maxit = 1000, Hess = TRUE) sum((log(perm) - predict(rock.nn))^2) detach() eigen(rock.nn$Hessian, TRUE)$values # rock.nn$Hessian in R Xp <- expand.grid(area = seq(0.1, 1.2, 0.05), peri = seq(0, 0.5, 0.02), shape = 0.2) rock.grid <- cbind(Xp, fit = predict(rock.nn, Xp)) wireframe(fit ~ area + peri, rock.grid, screen = list(z=160, x=-60), aspect = c(1, 0.5), drape = TRUE) # or persp(seq(0.1, 1.2, 0.05), seq(0, 0.5, 0.02), matrix(rock.grid$fit, 23), d = 5, theta = -160, phi = 30, zlim = c(-1, 15)) attach(cpus2) cpus3 <- data.frame(syct= syct-2, mmin=mmin-3, mmax=mmax-4, cach=cach/256, chmin=chmin/100, chmax=chmax/100, perf=perf) detach() test.cpus <- function(fit) sqrt(sum((log10(cpus3[-cpus.samp, "perf"]) - predict(fit, cpus3[-cpus.samp,]))^2)/109) cpus.nn1 <- nnet(log10(perf) ~ ., cpus3[cpus.samp,], linout = TRUE, skip = TRUE, size = 0) test.cpus(cpus.nn1) cpus.nn2 <- nnet(log10(perf) ~ ., cpus3[cpus.samp,], linout = TRUE, skip = TRUE, size = 4, decay = 0.01, maxit = 1000) test.cpus(cpus.nn2) cpus.nn3 <- nnet(log10(perf) ~ ., cpus3[cpus.samp,], linout = TRUE, skip = TRUE, size = 10, decay = 0.01, maxit = 1000) test.cpus(cpus.nn3) cpus.nn4 <- nnet(log10(perf) ~ ., cpus3[cpus.samp,], linout = TRUE, skip = TRUE, size = 25, decay = 0.01, maxit = 1000) test.cpus(cpus.nn4) CVnn.cpus <- function(formula, data = cpus3[cpus.samp, ], size = c(0, 4, 4, 10, 10), lambda = c(0, rep(c(0.003, 0.01), 2)), nreps = 5, nifold = 10, ...) { CVnn1 <- function(formula, data, nreps=1, ri, ...) { truth <- log10(data$perf) res <- numeric(length(truth)) cat(" fold") for (i in sort(unique(ri))) { cat(" ", i, sep="") for(rep in 1:nreps) { learn <- nnet(formula, data[ri !=i,], trace=FALSE, ...) res[ri == i] <- res[ri == i] + predict(learn, data[ri == i,]) } } cat("\n") sum((truth - res/nreps)^2) } choice <- numeric(length(lambda)) ri <- sample(nifold, nrow(data), replace = TRUE) for(j in seq(along=lambda)) { cat(" size =", size[j], "decay =", lambda[j], "\n") choice[j] <- CVnn1(formula, data, nreps=nreps, ri=ri, size=size[j], decay=lambda[j], ...) } cbind(size=size, decay=lambda, fit=sqrt(choice/100)) } CVnn.cpus(log10(perf) ~ ., data = cpus3[cpus.samp,], linout = TRUE, skip = TRUE, maxit = 1000) # End of ch08 MASS/inst/scripts/ch12.R0000644000176000001440000003434611754561330014410 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 12 Classification library(MASS) pdf(file="ch12.pdf", width=8, height=6, pointsize=9) options(width=65, digits=5) library(class) library(nnet) # 12.1 Discriminant Analysis ir <- rbind(iris3[,,1], iris3[,,2], iris3[,,3]) ir.species <- factor(c(rep("s", 50), rep("c", 50), rep("v", 50))) (ir.lda <- lda(log(ir), ir.species)) ir.ld <- predict(ir.lda, dimen = 2)$x eqscplot(ir.ld, type = "n", xlab = "first linear discriminant", ylab = "second linear discriminant") text(ir.ld, labels = as.character(ir.species[-143]), col = 3 + unclass(ir.species), cex = 0.8) plot(ir.lda, dimen = 1) plot(ir.lda, type = "density", dimen = 1) lcrabs <- log(crabs[, 4:8]) crabs.grp <- factor(c("B", "b", "O", "o")[rep(1:4, each = 50)]) (dcrabs.lda <- lda(crabs$sex ~ FL + RW + CL + CW, lcrabs)) table(crabs$sex, predict(dcrabs.lda)$class) (dcrabs.lda4 <- lda(crabs.grp ~ FL + RW + CL + CW, lcrabs)) dcrabs.pr4 <- predict(dcrabs.lda4, dimen = 2) dcrabs.pr2 <- dcrabs.pr4$post[, c("B", "O")] %*% c(1, 1) table(crabs$sex, dcrabs.pr2 > 0.5) cr.t <- dcrabs.pr4$x[, 1:2] eqscplot(cr.t, type = "n", xlab = "First LD", ylab = "Second LD") text(cr.t, labels = as.character(crabs.grp)) perp <- function(x, y) { m <- (x+y)/2 s <- - (x[1] - y[1])/(x[2] - y[2]) abline(c(m[2] - s*m[1], s)) invisible() } cr.m <- lda(cr.t, crabs$sex)$means points(cr.m, pch = 3, mkh = 0.3) perp(cr.m[1, ], cr.m[2, ]) cr.lda <- lda(cr.t, crabs.grp) x <- seq(-6, 6, 0.25) y <- seq(-2, 2, 0.25) Xcon <- matrix(c(rep(x,length(y)), rep(y, rep(length(x), length(y)))),,2) cr.pr <- predict(cr.lda, Xcon)$post[, c("B", "O")] %*% c(1,1) contour(x, y, matrix(cr.pr, length(x), length(y)), levels = 0.5, labex = 0, add = TRUE, lty= 3) for(i in c("O", "o", "B", "b")) print(var(lcrabs[crabs.grp == i, ])) fgl.ld <- predict(lda(type ~ ., fgl), dimen = 2)$x eqscplot(fgl.ld, type = "n", xlab = "LD1", ylab = "LD2") # either # for(i in seq(along = levels(fgl$type))) { # set <- fgl$type[-40] == levels(fgl$type)[i] # points(fgl.ld[set,], pch = 18, cex = 0.6, col = 2 + i)} # key(text = list(levels(fgl$type), col = 3:8)) # or text(fgl.ld, cex = 0.6, labels = c("F", "N", "V", "C", "T", "H")[fgl$type[-40]]) fgl.rld <- predict(lda(type ~ ., fgl, method = "t"), dimen = 2)$x eqscplot(fgl.rld, type = "n", xlab = "LD1", ylab = "LD2") # either # for(i in seq(along = levels(fgl$type))) { # set <- fgl$type[-40] == levels(fgl$type)[i] # points(fgl.rld[set,], pch = 18, cex = 0.6, col = 2 + i)} # key(text = list(levels(fgl$type), col = 3:8)) # or text(fgl.rld, cex = 0.6, labels = c("F", "N", "V", "C", "T", "H")[fgl$type[-40]]) # 12.2 Classification theory #decrease len if you have little memory. predplot <- function(object, main="", len = 100, ...) { plot(Cushings[,1], Cushings[,2], log="xy", type="n", xlab = "Tetrahydrocortisone", ylab = "Pregnanetriol", main = main) for(il in 1:4) { set <- Cushings$Type==levels(Cushings$Type)[il] text(Cushings[set, 1], Cushings[set, 2], labels=as.character(Cushings$Type[set]), col = 2 + il) } xp <- seq(0.6, 4.0, length=len) yp <- seq(-3.25, 2.45, length=len) cushT <- expand.grid(Tetrahydrocortisone = xp, Pregnanetriol = yp) Z <- predict(object, cushT, ...); zp <- as.numeric(Z$class) zp <- Z$post[,3] - pmax(Z$post[,2], Z$post[,1]) contour(exp(xp), exp(yp), matrix(zp, len), add = TRUE, levels = 0, labex = 0) zp <- Z$post[,1] - pmax(Z$post[,2], Z$post[,3]) contour(exp(xp), exp(yp), matrix(zp, len), add = TRUE, levels = 0, labex = 0) invisible() } cushplot <- function(xp, yp, Z) { plot(Cushings[, 1], Cushings[, 2], log = "xy", type = "n", xlab = "Tetrahydrocortisone", ylab = "Pregnanetriol") for(il in 1:4) { set <- Cushings$Type==levels(Cushings$Type)[il] text(Cushings[set, 1], Cushings[set, 2], labels = as.character(Cushings$Type[set]), col = 2 + il) } zp <- Z[, 3] - pmax(Z[, 2], Z[, 1]) contour(exp(xp), exp(yp), matrix(zp, np), add = TRUE, levels = 0, labex = 0) zp <- Z[, 1] - pmax(Z[, 2], Z[, 3]) contour(exp(xp), exp(yp), matrix(zp, np), add = TRUE, levels = 0, labex = 0) invisible() } cush <- log(as.matrix(Cushings[, -3])) tp <- Cushings$Type[1:21, drop = TRUE] cush.lda <- lda(cush[1:21,], tp); predplot(cush.lda, "LDA") cush.qda <- qda(cush[1:21,], tp); predplot(cush.qda, "QDA") predplot(cush.qda, "QDA (predictive)", method = "predictive") predplot(cush.qda, "QDA (debiased)", method = "debiased") Cf <- data.frame(tp = tp, Tetrahydrocortisone = log(Cushings[1:21, 1]), Pregnanetriol = log(Cushings[1:21, 2]) ) cush.multinom <- multinom(tp ~ Tetrahydrocortisone + Pregnanetriol, Cf, maxit = 250) xp <- seq(0.6, 4.0, length = 100); np <- length(xp) yp <- seq(-3.25, 2.45, length = 100) cushT <- expand.grid(Tetrahydrocortisone = xp, Pregnanetriol = yp) Z <- predict(cush.multinom, cushT, type = "probs") cushplot(xp, yp, Z) library(tree) cush.tr <- tree(tp ~ Tetrahydrocortisone + Pregnanetriol, Cf) plot(cush[, 1], cush[, 2], type = "n", xlab = "Tetrahydrocortisone", ylab = "Pregnanetriol") for(il in 1:4) { set <- Cushings$Type==levels(Cushings$Type)[il] text(cush[set, 1], cush[set, 2], labels = as.character(Cushings$Type[set]), col = 2 + il) } par(cex = 1.5); partition.tree(cush.tr, add = TRUE); par(cex = 1) # 12.3 Non-parametric rules Z <- knn(scale(cush[1:21, ], FALSE, c(3.4, 5.7)), scale(cushT, FALSE, c(3.4, 5.7)), tp) cushplot(xp, yp, class.ind(Z)) Z <- knn(scale(cush[1:21, ], FALSE, c(3.4, 5.7)), scale(cushT, FALSE, c(3.4, 5.7)), tp, k = 3) cushplot(xp, yp, class.ind(Z)) # 12.4 Neural networks pltnn <- function(main, ...) { plot(Cushings[,1], Cushings[,2], log="xy", type="n", xlab="Tetrahydrocortisone", ylab = "Pregnanetriol", main=main, ...) for(il in 1:4) { set <- Cushings$Type==levels(Cushings$Type)[il] text(Cushings[set, 1], Cushings[set, 2], as.character(Cushings$Type[set]), col = 2 + il) } } plt.bndry <- function(size=0, decay=0, ...) { cush.nn <- nnet(cush, tpi, skip=TRUE, softmax=TRUE, size=size, decay=decay, maxit=1000) invisible(b1(predict(cush.nn, cushT), ...)) } b1 <- function(Z, ...) { zp <- Z[,3] - pmax(Z[,2], Z[,1]) contour(exp(xp), exp(yp), matrix(zp, np), add=TRUE, levels=0, labex=0, ...) zp <- Z[,1] - pmax(Z[,3], Z[,2]) contour(exp(xp), exp(yp), matrix(zp, np), add=TRUE, levels=0, labex=0, ...) } cush <- cush[1:21,]; tpi <- class.ind(tp) # functions pltnn and plt.bndry given in the scripts par(mfrow = c(2, 2)) pltnn("Size = 2") set.seed(1); plt.bndry(size = 2, col = 2) set.seed(3); plt.bndry(size = 2, col = 3) plt.bndry(size = 2, col = 4) pltnn("Size = 2, lambda = 0.001") set.seed(1); plt.bndry(size = 2, decay = 0.001, col = 2) set.seed(2); plt.bndry(size = 2, decay = 0.001, col = 4) pltnn("Size = 2, lambda = 0.01") set.seed(1); plt.bndry(size = 2, decay = 0.01, col = 2) set.seed(2); plt.bndry(size = 2, decay = 0.01, col = 4) pltnn("Size = 5, 20 lambda = 0.01") set.seed(2); plt.bndry(size = 5, decay = 0.01, col = 1) set.seed(2); plt.bndry(size = 20, decay = 0.01, col = 2) # functions pltnn and b1 are in the scripts pltnn("Many local maxima") Z <- matrix(0, nrow(cushT), ncol(tpi)) for(iter in 1:20) { set.seed(iter) cush.nn <- nnet(cush, tpi, skip = TRUE, softmax = TRUE, size = 3, decay = 0.01, maxit = 1000, trace = FALSE) Z <- Z + predict(cush.nn, cushT) cat("final value", format(round(cush.nn$value,3)), "\n") b1(predict(cush.nn, cushT), col = 2, lwd = 0.5) } pltnn("Averaged") b1(Z, lwd = 3) # 12.5 Support vector machines library(e1071) crabs.svm <- svm(crabs$sp ~ ., data = lcrabs, cost = 100, gamma = 1) table(true = crabs$sp, predicted = predict(crabs.svm, lcrabs)) svm(crabs$sp ~ ., data = lcrabs, cost = 100, gamma = 1, cross = 10) # 12.6 Forensic glass example set.seed(123) # dump random partition from S-PLUS rand <- c(9, 6, 7, 10, 8, 8, 2, 2, 10, 1, 5, 2, 3, 8, 6, 8, 2, 6, 4, 4, 6, 1, 3, 2, 5, 5, 5, 3, 1, 9, 10, 2, 8, 2, 1, 6, 2, 7, 7, 8, 4, 1, 9, 5, 5, 1, 4, 6, 8, 6, 5, 7, 9, 2, 1, 1, 10, 9, 7, 6, 4, 7, 4, 8, 9, 9, 1, 8, 9, 5, 3, 3, 4, 8, 8, 6, 6, 9, 3, 10, 3, 10, 6, 6, 5, 10, 10, 2, 10, 6, 1, 4, 7, 8, 9, 10, 7, 10, 8, 4, 6, 8, 9, 10, 1, 9, 10, 6, 8, 4, 10, 8, 2, 10, 2, 3, 10, 1, 5, 9, 4, 4, 8, 2, 7, 6, 4, 8, 10, 4, 8, 10, 6, 10, 4, 9, 4, 1, 6, 5, 3, 2, 4, 1, 3, 4, 8, 4, 3, 7, 2, 5, 4, 5, 10, 7, 4, 2, 6, 3, 2, 2, 8, 4, 10, 8, 10, 2, 10, 6, 5, 2, 3, 2, 6, 2, 7, 7, 8, 9, 7, 10, 8, 6, 7, 9, 7, 10, 3, 2, 7, 5, 6, 1, 3, 9, 7, 7, 1, 8, 7, 8, 8, 8, 10, 4, 5, 9, 4, 6, 9, 6, 10, 2) con <- function(...) { print(tab <- table(...)) diag(tab) <- 0 cat("error rate = ", round(100*sum(tab)/length(list(...)[[1]]), 2), "%\n") invisible() } CVtest <- function(fitfn, predfn, ...) { res <- fgl$type for (i in sort(unique(rand))) { cat("fold ", i, "\n", sep = "") learn <- fitfn(rand != i, ...) res[rand == i] <- predfn(learn, rand == i) } res } res.multinom <- CVtest( function(x, ...) multinom(type ~ ., fgl[x, ], ...), function(obj, x) predict(obj, fgl[x, ], type = "class"), maxit = 1000, trace = FALSE) con(true = fgl$type, predicted = res.multinom) res.lda <- CVtest( function(x, ...) lda(type ~ ., fgl[x, ], ...), function(obj, x) predict(obj, fgl[x, ])$class ) con(true = fgl$type, predicted = res.lda) fgl0 <- fgl[ , -10] # drop type { res <- fgl$type for (i in sort(unique(rand))) { cat("fold ", i ,"\n", sep = "") sub <- rand == i res[sub] <- knn(fgl0[!sub, ], fgl0[sub, ], fgl$type[!sub], k = 1) } res } -> res.knn1 con(true = fgl$type, predicted = res.knn1) res.lb <- knn(fgl0, fgl0, fgl$type, k = 3, prob = TRUE, use.all = FALSE) table(attr(res.lb, "prob")) library(rpart) res.rpart <- CVtest( function(x, ...) { tr <- rpart(type ~ ., fgl[x,], ...) cp <- tr$cptable r <- cp[, 4] + cp[, 5] rmin <- min(seq(along = r)[cp[, 4] < min(r)]) cp0 <- cp[rmin, 1] cat("size chosen was", cp[rmin, 2] + 1, "\n") prune(tr, cp = 1.01*cp0) }, function(obj, x) predict(obj, fgl[x, ], type = "class"), cp = 0.001 ) con(true = fgl$type, predicted = res.rpart) fgl1 <- fgl fgl1[1:9] <- lapply(fgl[, 1:9], function(x) {r <- range(x); (x - r[1])/diff(r)}) CVnn2 <- function(formula, data, size = rep(6,2), lambda = c(0.001, 0.01), nreps = 1, nifold = 5, verbose = 99, ...) { CVnn1 <- function(formula, data, nreps=1, ri, verbose, ...) { truth <- data[,deparse(formula[[2]])] res <- matrix(0, nrow(data), length(levels(truth))) if(verbose > 20) cat(" inner fold") for (i in sort(unique(ri))) { if(verbose > 20) cat(" ", i, sep="") for(rep in 1:nreps) { learn <- nnet(formula, data[ri !=i,], trace = FALSE, ...) res[ri == i,] <- res[ri == i,] + predict(learn, data[ri == i,]) } } if(verbose > 20) cat("\n") sum(as.numeric(truth) != max.col(res/nreps)) } truth <- data[,deparse(formula[[2]])] res <- matrix(0, nrow(data), length(levels(truth))) choice <- numeric(length(lambda)) for (i in sort(unique(rand))) { if(verbose > 0) cat("fold ", i,"\n", sep="") ri <- sample(nifold, sum(rand!=i), replace=TRUE) for(j in seq(along=lambda)) { if(verbose > 10) cat(" size =", size[j], "decay =", lambda[j], "\n") choice[j] <- CVnn1(formula, data[rand != i,], nreps=nreps, ri=ri, size=size[j], decay=lambda[j], verbose=verbose, ...) } decay <- lambda[which.is.max(-choice)] csize <- size[which.is.max(-choice)] if(verbose > 5) cat(" #errors:", choice, " ") # if(verbose > 1) cat("chosen size = ", csize, " decay = ", decay, "\n", sep="") for(rep in 1:nreps) { learn <- nnet(formula, data[rand != i,], trace=FALSE, size=csize, decay=decay, ...) res[rand == i,] <- res[rand == i,] + predict(learn, data[rand == i,]) } } factor(levels(truth)[max.col(res/nreps)], levels = levels(truth)) } if(FALSE) { # only run this if you have time to wait res.nn2 <- CVnn2(type ~ ., fgl1, skip = TRUE, maxit = 500, nreps = 10) con(true = fgl$type, predicted = res.nn2) } res.svm <- CVtest( function(x, ...) svm(type ~ ., fgl[x, ], ...), function(obj, x) predict(obj, fgl[x, ]), cost = 100, gamma = 1 ) con(true = fgl$type, predicted = res.svm) svm(type ~ ., data = fgl, cost = 100, gamma = 1, cross = 10) cd0 <- lvqinit(fgl0, fgl$type, prior = rep(1, 6)/6, k = 3) cd1 <- olvq1(fgl0, fgl$type, cd0) con(true = fgl$type, predicted = lvqtest(cd1, fgl0)) CV.lvq <- function() { res <- fgl$type for(i in sort(unique(rand))) { cat("doing fold", i, "\n") cd0 <- lvqinit(fgl0[rand != i,], fgl$type[rand != i], prior = rep(1, 6)/6, k = 3) cd1 <- olvq1(fgl0[rand != i,], fgl$type[rand != i], cd0) cd1 <- lvq3(fgl0[rand != i,], fgl$type[rand != i], cd1, niter = 10000) res[rand == i] <- lvqtest(cd1, fgl0[rand == i, ]) } res } con(true = fgl$type, predicted = CV.lvq()) # 12.7 Calibration plots CVprobs <- function(fitfn, predfn, ...) { res <- matrix(, 214, 6) for (i in sort(unique(rand))) { cat("fold ", i, "\n", sep = "") learn <- fitfn(rand != i, ...) res[rand == i, ] <- predfn(learn, rand == i) } res } probs.multinom <- CVprobs( function(x, ...) multinom(type ~ ., fgl[x, ], ...), function(obj, x) predict(obj, fgl[x, ], type = "probs"), maxit = 1000, trace = FALSE) probs.yes <- as.vector(class.ind(fgl$type)) probs <- as.vector(probs.multinom) par(pty = "s") plot(c(0, 1), c(0, 1), type = "n", xlab = "predicted probability", ylab = "", xaxs = "i", yaxs = "i", las = 1) rug(probs[probs.yes == 0], 0.02, side = 1, lwd = 0.5) rug(probs[probs.yes == 1], 0.02, side = 3, lwd = 0.5) abline(0, 1) newp <- seq(0, 1, length = 100) lines(newp, predict(loess(probs.yes ~ probs, span = 1), newp)) # End of ch12 MASS/inst/scripts/ch04.R0000644000176000001440000001761111754561330014405 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 4 Graphical Output library(MASS) library(lattice) pdf(file="ch04.pdf", width=8, height=6, pointsize=9) options(width=65, digits=5) # 4.2 Basic plotting functions lung.deaths <- aggregate(ts.union(mdeaths, fdeaths), 1) barplot(t(lung.deaths), names = dimnames(lung.deaths)[[1]], main = "UK deaths from lung disease") if(interactive()) legend(locator(1), c("Males", "Females"), fill = c(2, 3)) loc <- barplot(t(lung.deaths), names = dimnames(lung.deaths)[[1]], angle = c(45, 135), density = 10, col = 1) total <- rowSums(lung.deaths) text(loc, total + par("cxy")[2], total, cex = 0.7, xpd = TRUE) # S: if(interactive()) brush(hills) topo.loess <- loess(z ~ x * y, topo, degree = 2, span = 0.25) topo.mar <- list(x = seq(0, 6.5, 0.2), y=seq(0, 6.5, 0.2)) topo.lo <- predict(topo.loess, expand.grid(topo.mar)) par(pty = "s") # square plot contour(topo.mar$x, topo.mar$y, topo.lo, xlab = "", ylab = "", levels = seq(700,1000,25), cex = 0.7) points(topo$x, topo$y) par(pty = "m") topo.lo1 <- cbind(expand.grid(x=topo.mar$x, y=topo.mar$y), z=as.vector(topo.lo)) contourplot(z ~ x * y, topo.lo1, aspect = 1, at = seq(700, 1000, 25), xlab = "", ylab = "", panel = function(x, y, subscripts, ...) { panel.levelplot(x, y, subscripts, ...) panel.xyplot(topo$x,topo$y, cex = 0.5) } ) # see help(Skye) # ternary(Skye/100, ord = c(1, 3, 2)) # 4.3 Enhancing plots attach(wtloss) oldpar <- par(no.readonly = TRUE) # alter margin 4; others are default par(mar = c(5.1, 4.1, 4.1, 4.1)) plot(Days, Weight, type = "p", ylab = "Weight (kg)") Wt.lbs <- pretty(range(Weight*2.205)) axis(side = 4, at = Wt.lbs/2.205, lab = Wt.lbs, las = 0) mtext("Weight (lb)", side = 4, line = 3) detach() par(oldpar) x <- 0:100 plik <- function(lambda) sum(dpois(x, lambda) * 2 * ( (lambda - x) + x * log(pmax(1, x)/lambda))) lambda <- c(1e-8, 0.05, seq(0.1, 5, 0.1)) plot(lambda, sapply(lambda, plik), type = "l", ylim = c(0, 1.4), xlab = expression(lambda), ylab = expression(paste(E[lambda], "(deviance)"))) abline(h = 1, lty = 3) # 4.4 Fine control of graphics ## in R just use swiss # swiss <- data.frame(Fertility = swiss.fertility, swiss.x) attach(swiss) qqnorm(Infant.Mortality) qqline(Infant.Mortality) samp <- cbind(Infant.Mortality, matrix(rnorm(47*19), 47, 19)) samp <- apply(scale(samp), 2, sort) rs <- samp[, 1] xs <- qqnorm(rs, plot = FALSE)$x env <- t(apply(samp[, -1], 1, range)) matplot(xs, cbind(rs, env), type = "pnn", pch = 4, mkh = 0.06, axes = FALSE, xlab = "", ylab = "") xyul <- par("usr") smidge <- min(diff(c(xyul[1], xs, xyul[2])))/2 segments(xs - smidge, env[, 1], xs + smidge, env[, 1]) segments(xs - smidge, env[, 2], xs + smidge, env[, 2]) xul <- trunc(10*xyul[1:2])/10 axis(1, at=seq(xul[1], xul[2], by=0.1), labels = FALSE, tck=0.01) xi <- trunc(xyul[1:2]) axis(1, at = seq(xi[1], xi[2], by = 0.5), tck = 0.02) yul <- trunc(5*xyul[3:4])/5 axis(2, at=seq(yul[1], yul[2], by=0.2), labels = FALSE, tck=0.01) yi <- trunc(xyul[3:4]) axis(2, at = yi[1]:yi[2], tck = 0.02) box(bty = "l") # lower case "L" # ps.options()$fonts # R cannot change font family in a plot. mtext("Quantiles of Standard Normal", side=1, line=2.5, font=3) mtext(expression(R[i]), side = 2, line = 2, at = yul[2]) detach() # 4.5 Trellis graphics xyplot(time ~ dist, data = hills, panel = function(x, y, ...) { panel.xyplot(x, y, ...) panel.lmline(x, y, type = "l") panel.abline(lqs(y ~ x), lty = 3) # identify(x, y, row.names(hills)) } ) ## note: don't use separate title() call bwplot(Expt ~ Speed, data = michelson, ylab = "Experiment No.", main = "Speed of Light Data") splom(~ swiss, aspect = "fill", panel = function(x, y, ...) { panel.xyplot(x, y, ...); panel.loess(x, y, ...) } ) sps <- trellis.par.get("superpose.symbol") sps$pch <- 1:7 trellis.par.set("superpose.symbol", sps) xyplot(Time ~ Viscosity, data = stormer, groups = Wt, panel = panel.superpose, type = "b", key = list(columns = 3, text = list(paste(c("Weight: ", "", ""), unique(stormer$Wt), "gms")), points = Rows(sps, 1:3) ) ) rm(sps) topo.plt <- expand.grid(topo.mar) topo.plt$pred <- as.vector(predict(topo.loess, topo.plt)) levelplot(pred ~ x * y, topo.plt, aspect = 1, at = seq(690, 960, 10), xlab = "", ylab = "", panel = function(x, y, subscripts, ...) { panel.levelplot(x, y, subscripts, ...) panel.xyplot(topo$x,topo$y, cex = 0.5, col = 1) } ) wireframe(pred ~ x * y, topo.plt, aspect = c(1, 0.5), drape = TRUE, screen = list(z = -150, x = -60), colorkey = list(space="right", height=0.6)) lcrabs.pc <- predict(princomp(log(crabs[,4:8]))) crabs.grp <- c("B", "b", "O", "o")[rep(1:4, each = 50)] splom(~ lcrabs.pc[, 1:3], groups = crabs.grp, panel = panel.superpose, key = list(text = list(c("Blue male", "Blue female", "Orange Male", "Orange female")), points = Rows(trellis.par.get("superpose.symbol"), 1:4), columns = 4) ) sex <- crabs$sex levels(sex) <- c("Female", "Male") sp <- crabs$sp levels(sp) <- c("Blue", "Orange") splom(~ lcrabs.pc[, 1:3] | sp*sex, cex = 0.5, pscales = 0) Quine <- quine levels(Quine$Eth) <- c("Aboriginal", "Non-aboriginal") levels(Quine$Sex) <- c("Female", "Male") levels(Quine$Age) <- c("primary", "first form", "second form", "third form") levels(Quine$Lrn) <- c("Average learner", "Slow learner") bwplot(Age ~ Days | Sex*Lrn*Eth, data = Quine) bwplot(Age ~ Days | Sex*Lrn*Eth, data = Quine, layout = c(4, 2), strip = function(...) strip.default(..., style = 1)) stripplot(Age ~ Days | Sex*Lrn*Eth, data = Quine, jitter = TRUE, layout = c(4, 2)) stripplot(Age ~ Days | Eth*Sex, data = Quine, groups = Lrn, jitter = TRUE, panel = function(x, y, subscripts, jitter.data = FALSE, ...) { if(jitter.data) y <- jitter(as.numeric(y)) panel.superpose(x, y, subscripts, ...) }, xlab = "Days of absence", between = list(y = 1), par.strip.text = list(cex = 0.7), key = list(columns = 2, text = list(levels(Quine$Lrn)), points = Rows(trellis.par.get("superpose.symbol"), 1:2) ), strip = function(...) strip.default(..., strip.names = c(TRUE, TRUE), style = 1) ) fgl0 <- fgl[ ,-10] # omit type. fgl.df <- data.frame(type = rep(fgl$type, 9), y = as.vector(as.matrix(fgl0)), meas = factor(rep(1:9, each = 214), labels = names(fgl0))) stripplot(type ~ y | meas, data = fgl.df, scales = list(x = "free"), xlab = "", cex = 0.5, strip = function(...) strip.default(style = 1, ...)) if(FALSE) { # no data supplied xyplot(ratio ~ scant | subject, data = A5, xlab = "scan interval (years)", ylab = "ventricle/brain volume normalized to 1 at start", subscripts = TRUE, ID = A5$ID, strip = function(factor, ...) strip.default(..., factor.levels = labs, style = 1), layout = c(8, 5, 1), skip = c(rep(FALSE, 37), rep(TRUE, 1), rep(FALSE, 1)), panel = function(x, y, subscripts, ID) { panel.xyplot(x, y, type = "b", cex = 0.5) which <- unique(ID[subscripts]) panel.xyplot(c(0, 1.5), pr3[names(pr3) == which], type = "l", lty = 3) if(which == 303 || which == 341) points(1.4, 1.3) }) } Cath <- equal.count(swiss$Catholic, number = 6, overlap = 0.25) xyplot(Fertility ~ Education | Cath, data = swiss, span = 1, layout = c(6, 1), aspect = 1, panel = function(x, y, span) { panel.xyplot(x, y); panel.loess(x, y, span) } ) Cath2 <- equal.count(swiss$Catholic, number = 2, overlap = 0) Agr <- equal.count(swiss$Agric, number = 3, overlap = 0.25) xyplot(Fertility ~ Education | Agr * Cath2, data = swiss, span = 1, aspect = "xy", panel = function(x, y, span) { panel.xyplot(x, y); panel.loess(x, y, span) } ) Cath levels(Cath) plot(Cath, aspect = 0.3) # End of ch04 MASS/inst/scripts/ch03.R0000644000176000001440000000733711754561330014410 0ustar ripleyusers#-*- R -*- ## Script from Fourth Edition of `Modern Applied Statistics with S' # Chapter 3 S Language library(MASS) options(width=65, digits=5, height=9999) # from Chapter 2 powers.of.pi <- pi^(-2:2) names(powers.of.pi) <- -2:2 mymat <- matrix(1:30, 3, 10) myarr <- mymat dim(myarr) <- c(3, 5, 2) dimnames(myarr) <- list(letters[1:3], NULL, c("(i)", "(ii)")) # 3.1 Language layout 1 - pi + exp(1.7) a <- 6 b <- a <- 6 (z <- 1 - pi + exp(1.7)) search() objects() objects(2) find("objects") get("[<-.data.frame", pos = 2) # hills <- hills # only needed in S-PLUS hills$ispeed <- hills$time/hills$dist # 3.2 More on S objects length(letters) Empl <- list(employee = "Anna", spouse = "Fred", children = 3, child.ages = c(4, 7, 9)) Empl$employee Empl$child.ages[2] x <- "spouse"; Empl[[x]] unlist(Empl) unlist(Empl, use.names = F) attributes(myarr) attr(myarr, "dim") Empl <- c(Empl, service = 8) c(list(x = 1:3, a = 3:6), list(y = 8:23, b = c(3, 8, 39))) as(powers.of.pi, "vector") as(powers.of.pi, "numeric") is(powers.of.pi, "numeric") as(powers.of.pi, "character") is(powers.of.pi, "vector") as(powers.of.pi, "integer") is(mymat, "array") # 3.3 Arithmetical expressions x <- c(10.4, 5.6, 3.1, 6.4, 21.7) y <- c(x, x) v <- 2 * x + y + 1 s3 <- seq(-5, 5, by = 0.2) s4 <- seq(length = 51, from = -5, by = 0.2) s5 <- rep(x, times = 5) # repeat whole vector s5 <- rep(x, each = 5) # repeat element-by-element x <- 1:4 # puts c(1,2,3,4) into x i <- rep(2, 4) # puts c(2,2,2,2) into i y <- rep(x, 2) # puts c(1,2,3,4,1,2,3,4) into y z <- rep(x, i) # puts c(1,1,2,2,3,3,4,4) into z w <- rep(x, x) # puts c(1,2,2,3,3,3,4,4,4,4) into w ( colc <- rep(1:3, each = 8) ) ( rowc <- rep(rep(1:4, each = 2), 3) ) 1 + (ceiling(1:24/8) - 1) %% 3 -> colc; colc 1 + (ceiling(1:24/2) - 1) %% 4 -> rowc; rowc # or gl(3, 8) gl(4, 2, 24) # 3.4 Character vector operations paste(c("X", "Y"), 1:4) paste(c("X", "Y"), 1:4, sep = "") paste(c("X", "Y"), 1:4, sep = "", collapse = " + ") substring(state.name[44:50], 1, 4) as.vector(abbreviate(state.name[44:50])) as.vector(abbreviate(state.name[44:50], use.classes = FALSE)) grep("na$", state.name) regexpr("na$", state.name) state.name[regexpr("na$", state.name)> 0] # 3.5 Formatting and printing d <- date() cat("Today's date is:", substring(d, 1, 10), substring(d, 25, 28), "\n") cat(1, 2, 3, 4, 5, 6, fill = 8, labels = letters) cat(powers.of.pi, "\n") format(powers.of.pi) cat(format(powers.of.pi), "\n", sep=" ") # 3.6 Calling conventions for functions args(hist.default) # 3.8 Control stuctures yp <- rpois(50, lambda = 1) # full Poisson sample of size 50 table(yp) y <- yp[yp > 0] # truncate the zeros; n = 29 ybar <- mean(y); ybar lam <- ybar it <- 0 # iteration count del <- 1 # iterative adjustment while (abs(del) > 0.0001 && (it <- it + 1) < 10) { del <- (lam - ybar*(1 - exp(-lam)))/(1 - ybar*exp(-lam)) lam <- lam - del cat(it, lam, "\n")} # 3.9 Array and matrix operations p <- dbinom(0:4, size = 4, prob = 1/3) # an example CC <- -(p %o% p) diag(CC) <- p + diag(CC) structure(3^8 * CC, dimnames = list(0:4, 0:4)) # convenience apply(iris3, c(2, 3), mean) apply(iris3, c(2, 3), mean, trim = 0.1) apply(iris3, 2, mean) ir.var <- apply(iris3, 3, var) ir.var <- array(ir.var, dim = dim(iris3)[c(2, 2, 3)], dimnames = dimnames(iris3)[c(2, 2, 3)]) matrix(rep(1/50, 50) %*% matrix(iris3, nrow = 50), nrow = 4, dimnames = dimnames(iris3)[-1]) ir.means <- colMeans(iris3) sweep(iris3, c(2, 3), ir.means) log(sweep(iris3, c(2, 3), ir.means, "/")) # 3.10 Introduction to classes and methods methods(summary) # End of ch03 MASS/inst/NEWS0000644000176000001440000002546412620323217012532 0ustar ripleyusersSoftware and datasets to support 'Modern Applied Statistics with S', fourth edition, by W. N. Venables and B. D. Ripley. Springer, 2002, ISBN 0-387-95457-0. This file documents software changes since the third edition. - eqscplot has new arguments ratio and uin. - stepAIC will not drop strata terms in coxph or survreg models. - profile.glm will report inadequate supplied glm fits, not just fail. - new method confint.lm. - fractions/rational allow missing values. - mvrnorm has an 'empirical' argument. - predict.lda and predict.qda try harder to avoid exponential underflow. - new function fitdistr for ML estimation of univariate distributions. - new function glmmPQL to use lme to fit GLMMs by PQL - truehist allows rule for nbins to be specified as a character string. - parcoord function. - new datasets bacteria, epil, nlschools, SP500 - polr allows control argment for optim, reports lack of convergence. - stepAIC works again if formula has an offset (R had changed). - biplot.correspondence now shows the origin as a cross. - polr was not preserving contrasts to put in the fit object. - vcov methods for lme, gls, coxph and survReg. - Added 'tol' argument to isoMDS. - stepAIC now allow 'direction=both' starting from a full model. - glm.nb allows R-style 'start' argument. - truehist passes ... on to both plot.default() and rect(). - isoMDS now uses the C interface to optim. - addterm, dropterm, stepAIC now work with lme and gls fits. - huber checks for MAD equal to zero. - glmmPQL now loads nlme if not already loaded. - glmmPQL handles list 'random' arguments (7.0-11). - The MASS datasets no longer require data(foo) to load them. (7.0-11) - mvrnorm uses eigen(EISPACK=TRUE) for back-compatibility (7.0-11, R 1.7.0) - print.summary.polr could lose dimnames for 1 coefficient. - remove heart as survival in R now has it. - confint.{lm,glm} didn't handle specifying parm in all cases. - confint and confint.lm have been migrated to base in R. - addterm.default, dropterm.default and stepAIC work better inside functions. - glm.nb now sets AIC in the object, and has a logLik() method. - truehist now accepts a 'ylab' argument. - negative.binomial and neg.bin no longer generate objects with package:MASS in their environment. - stepAIC now drops (if allowed) 0-df terms sequentially from the right. - lda(CV=TRUE) now works for rank-deficient fits. - predict methods for lda, polr now check newdata types. - model.frame.lda/polr now look for the environment of the original formula. - polr has a new `model' argument defaulting to TRUE. - fitdistr supports the trivial case of a Normal distribution. - sammon and isoMDS now allow missing values in the dissimilarity matrix, and isoMDS allows Minkowski distances in the configuration space. - cov.trob works better if wts are supplied, and may converge a little faster in any case. - The ch11.R script now uses mclust not mclust1998. - The default xlab for boxcox() is now greek lambda. - glmmPQL now handles offset terms. - add predict.rlm method to correct predict.lm in the case se.fit=TRUE. - weighted rlm fits are handled better, and default to "inv.var". - logtrans works without specifying 'data'. - predict() method for glmmPQL. - polr() has an option for probit or proportional hazard fits. - neg.bin() and negative.binomial() had an error in the aic() formula. - The ch05.R script now includes the code for Figure 5.8. - Datasets austres, fdeaths, lh, mdeaths, nottem and rock are now visible in the 'datasets' package of R 2.0.0 and so have been removed here. - Script ch07.R now gives details using the gam() function in package gam as well as that in package mgcv. - rlm's fitted component is now always unweighted. - theta.{md,ml,mm} now have one help file with examples. - polr() has a new method "cauchit" suggested by Roger Koenker. (Requires R >= 2.1.0) - polr() now works with transformed intercepts, and usually converges better (contributed by David Firth). - polr() handles a rank-deficient model matrix. - polr() now returns the method used, and uses it for predictions. - anova() method for polr (contributed by John Fox). - predict.glmmPQL was not using the na.action in the object as intended. - The default methods for addterm and dropterm and anova.polr now check for changes in the number of cases in use caused e.g. by na.action=na.omit. - Added vcov() method for rlm fits. - eqscplot() accepts reversed values for xlim and ylim. - Script ch10.R uses se.contrast to calculate se's missing from model.tables. - profile() and confint() methods for polr(). - glm.convert() was not setting the `offset' component that R's glm objects have. - sammon() now checks for duplicates in the initial configuration. - isoMDS() and sammon() work around dropping of names.dist in 2.1.0 - lda() now gives an explicit error message if all group means are the same. - fitdistr() now has a logLik() method, chooses the optim() method if not supplied, handles the log-normal by closed-form and no longer attempts to handle the uniform. - glm.nb() now accepts 'mustart'. - glm.nb() now supports weights: they used to be ignored when estimating theta. - fitdistr() now supports geometric and Poisson distributions, and uses closed-form results for the exponential. - lm.ridge, lqs and rlm allow offset() terms. - the 'prior' argument of predict.qda is now operational. - script ch12.R now has b1() adapted for R's contour(). - anova.polr() quoted model dfs, not residual dfs. - stepAIC() applied to a polr fit now gets the correct rdf's in the anova table. - lm.gls() now returns fitted values and residuals on the original coordinates (not the uncorrelated ones). - parcoord() now allows missing values and has a new argument 'var.label' to label the variable axes. (Contributed by Fabian Scheipl.) - rlm() has a 'lqs.control' argument passed to lqs() where used for initialization. - rlm() could fail with some psi functions (e.g. psi.hampel) if 'init' was given as a numeric vector. - rlm() handles weighted fits slightly differently, in particular trying to give the same scale estimate if wt.method="case" as if replicating the cases. - confint.nls copes with plinear models in R (now profile.nls does). - The wrappers lmsreg() etc have been adapted to work in the MASS namespace. - qda() accepts formulae containing backquoted non-syntactic names. - polr() gives an explicit error message if 'start' is misspecified. - glmmPQL() evaluates the formulae for 'fixed' and 'random', which may help if they are given as variables and not values. - There are anova() and logLik() methods for class "glmmPQL" to stop misuse. - profile.polr() now works for a single-coefficient model. - The print and print.summary methods for polr and rlm make use of naprint() to print a message e.g. about deleted observations. - Class "ridgelm" now has a coef() method, and works for n < p. - lda() and qda() now check explicitly for non-finite 'x' values. - ch06.R has been updated for multcomp >= 0.991-1 - profile.glm is more likely to find the model frame in complicated scopes. - message() is used for most messages. - truehist() checks more thoroughly for erroneous inputs. - polr(model=TRUE) works again. - add logLik() method for polr. - the summary() methods for classes "negbin" and "rlm" now default to correlation = FALSE. - there is a vcov() method for class "negbin": unlike the "glm" method this defaults to dispersion = 1. - coding for 'sex' in ?Melanoma has been corrected. - the example for gamma.shape has a better starting point and so converges - avoid abbreviation of survreg(dist=) in example(gehan) - profile() and confint() methods for "glm" objects now handle rank-deficient fits. - profile.glm() produced an output in a format plot.profile could not read for single-variable fits. Also for confint() on intercept-only fits. - The print() methods for fitdistr() and lm.ridge() now return invisibly. - vcov() and profile() methods for polr() used starting values in the external not internal parametrization, which could slow convergence. - glm.nb() called theta.ml() incorrect when weights were supplied whch did not sum to n. - removed unused argument 'nseg' to plot.profile. - 'alpha' in the "glm" and "polr" methods for profile() is now interpreted as two-tailed univariate for consistency with other profile methods. - 'mammals': corrected typos in names, some thanks to Arni Magnusson. - profile.glm() now works for binomial glm specified with a matrix response and a cmpletely zero row. - there is a "negbin" method for simulate() - the use of package mclust has been removed from the ch11.R script because of the change of licence conditions for that package. - change ch13.R script for change in package 'survival' 2.35-x. - glmmPQL looks up variables in its 'correlation' argument (if a formula) in the usual scope (wish of Ben Bolker: such arguments are unsupported). - added a simulate() method for unweighted polr() fits. - kde2d() allows a length-2 argument 'n'. - the default for truehist(col=) is now set to a colour, not a colour number. - the returned fitted values and (undocumented) linear predictor for polr() did not take any offset into account (reported by Ioannis Kosmides). - the vcov() method for polr() now returns on the zeta scale (suggested by Achim Zeileis). - fitdistr() gains a vcov() method (suggested by Achim Zeileis). - ch06.R has R alternatives to fac.design. - ch11.R has R alternatives for ggobi and factor rotation. - hubers() copes in extreme cases when middle 50% of data is constant. - tests/ now includes dataset for polr.R, so checking depends only on base packages and lattice. - The "glm" method for profile() failed when given a binomial model with a two-column response. - fitdistr() works harder to rescale the problem when fitting a gamma. - cov.trob() handles zero weights without giving a warning (reported by John Fox). - boxcox() works better when 'y' is very badly scaled, e.g. around 1e-16 (patch by Martin Maechler). - mvrnorm() no longer defaults to the deprecated EISPACK=TRUE (and hence changes the results). It gains an argument 'EISPACK' for back-compatibility. - the "polr" method for profile() could lose dimensions in its return object (reported by Joris Meys) - kde2d() throws an error if given zero bandwidths or constant data. - ldahist(sep = TRUE) was missing a dev.flush(). - addterm.glm() mis-calculated F statistics for df > 1. - anova.loglm() needed revision for changes in R. - the addterm() default method allows update() to fail. - polr(method = "cloglog") implemented what is more commonly called the log-log link. Now both are provided. - lqs() fits with intercepts and contrasts lost the latter from the return value. - addterm() and dropterm() now handle empty scopes, transparently. - glmmPQL uses requireNamespace('nlme'), not require(). - mvnorm(EISPACK = TRUE) is now an error: it is unsupported by R >= 3.1.0. - the log-likelihood returned for fitdistr(, "geometric") was incorrect. MASS/tests/0000755000176000001440000000000012620323725012211 5ustar ripleyusersMASS/tests/regression.R0000644000176000001440000000021312204377337014516 0ustar ripleyusers### regression tests library(MASS) contr.sdif(6) contr.sdif(6, sparse=TRUE) stopifnot(all(contr.sdif(6) == contr.sdif(6, sparse=TRUE))) MASS/tests/hubers.R0000644000176000001440000000041111754561323013625 0ustar ripleyuserslibrary(MASS) x <- c(rep(1.407,3), rep(1.422,4), 1.597, 1.597, rep(1.64,19), 1.649, 1.664, 1.664, 1.67, 1.672, 1.672, 1.787, 4.42, 4.44, rep(6.55,3), rep(6.79,14), rep(7.19,,73), 9.00, 9.38, rep(9.41,10)) hubers(x) # failed to converge in MASS 7.3-9. MASS/tests/Examples/0000755000176000001440000000000012536567653014007 5ustar ripleyusersMASS/tests/Examples/MASS-Ex.Rout.save0000644000176000001440000047643012536567614016747 0ustar ripleyusers R version 3.2.1 RC (2015-06-10 r68507) -- "World-Famous Astronaut" Copyright (C) 2015 The R Foundation for Statistical Computing Platform: x86_64-unknown-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > pkgname <- "MASS" > source(file.path(R.home("share"), "R", "examples-header.R")) > options(warn = 1) > library('MASS') > > base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') > cleanEx() > nameEx("Insurance") > ### * Insurance > > flush(stderr()); flush(stdout()) > > ### Name: Insurance > ### Title: Numbers of Car Insurance claims > ### Aliases: Insurance > ### Keywords: datasets > > ### ** Examples > > ## main-effects fit as Poisson GLM with offset > glm(Claims ~ District + Group + Age + offset(log(Holders)), + data = Insurance, family = poisson) Call: glm(formula = Claims ~ District + Group + Age + offset(log(Holders)), family = poisson, data = Insurance) Coefficients: (Intercept) District2 District3 District4 Group.L Group.Q -1.810508 0.025868 0.038524 0.234205 0.429708 0.004632 Group.C Age.L Age.Q Age.C -0.029294 -0.394432 -0.000355 -0.016737 Degrees of Freedom: 63 Total (i.e. Null); 54 Residual Null Deviance: 236.3 Residual Deviance: 51.42 AIC: 388.7 > > # same via loglm > loglm(Claims ~ District + Group + Age + offset(log(Holders)), + data = Insurance) Call: loglm(formula = Claims ~ District + Group + Age + offset(log(Holders)), data = Insurance) Statistics: X^2 df P(> X^2) Likelihood Ratio 51.42003 54 0.5745071 Pearson 48.62933 54 0.6809086 > > > > cleanEx() > nameEx("Null") > ### * Null > > flush(stderr()); flush(stdout()) > > ### Name: Null > ### Title: Null Spaces of Matrices > ### Aliases: Null > ### Keywords: algebra > > ### ** Examples > > # The function is currently defined as > function(M) + { + tmp <- qr(M) + set <- if(tmp$rank == 0L) seq_len(ncol(M)) else -seq_len(tmp$rank) + qr.Q(tmp, complete = TRUE)[, set, drop = FALSE] + } function (M) { tmp <- qr(M) set <- if (tmp$rank == 0L) seq_len(ncol(M)) else -seq_len(tmp$rank) qr.Q(tmp, complete = TRUE)[, set, drop = FALSE] } > > > > cleanEx() > nameEx("OME") > ### * OME > > flush(stderr()); flush(stdout()) > > ### Name: OME > ### Title: Tests of Auditory Perception in Children with OME > ### Aliases: OME > ### Keywords: datasets > > ### ** Examples > > # Fit logistic curve from p = 0.5 to p = 1.0 > fp1 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/scal)), + c("L75", "scal"), + function(x,L75,scal)NULL) > nls(Correct/Trials ~ fp1(Loud, L75, scal), data = OME, + start = c(L75=45, scal=3)) Nonlinear regression model model: Correct/Trials ~ fp1(Loud, L75, scal) data: OME L75 scal 44.149 3.775 residual sum-of-squares: 69.88 Number of iterations to convergence: 4 Achieved convergence tolerance: 7.016e-06 > nls(Correct/Trials ~ fp1(Loud, L75, scal), + data = OME[OME$Noise == "coherent",], + start=c(L75=45, scal=3)) Nonlinear regression model model: Correct/Trials ~ fp1(Loud, L75, scal) data: OME[OME$Noise == "coherent", ] L75 scal 47.993 1.259 residual sum-of-squares: 30.35 Number of iterations to convergence: 5 Achieved convergence tolerance: 4.895e-06 > nls(Correct/Trials ~ fp1(Loud, L75, scal), + data = OME[OME$Noise == "incoherent",], + start = c(L75=45, scal=3)) Nonlinear regression model model: Correct/Trials ~ fp1(Loud, L75, scal) data: OME[OME$Noise == "incoherent", ] L75 scal 38.87 2.17 residual sum-of-squares: 23.73 Number of iterations to convergence: 11 Achieved convergence tolerance: 3.846e-06 > > # individual fits for each experiment > > aa <- factor(OME$Age) > ab <- 10*OME$ID + unclass(aa) > ac <- unclass(factor(ab)) > OME$UID <- as.vector(ac) > OME$UIDn <- OME$UID + 0.1*(OME$Noise == "incoherent") > rm(aa, ab, ac) > OMEi <- OME > > library(nlme) > fp2 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/2)), + "L75", function(x,L75) NULL) > dec <- getOption("OutDec") > options(show.error.messages = FALSE, OutDec=".") > OMEi.nls <- nlsList(Correct/Trials ~ fp2(Loud, L75) | UIDn, + data = OMEi, start = list(L75=45), control = list(maxiter=100)) > options(show.error.messages = TRUE, OutDec=dec) > tmp <- sapply(OMEi.nls, function(X) + {if(is.null(X)) NA else as.vector(coef(X))}) > OMEif <- data.frame(UID = round(as.numeric((names(tmp)))), + Noise = rep(c("coherent", "incoherent"), 110), + L75 = as.vector(tmp), stringsAsFactors = TRUE) > OMEif$Age <- OME$Age[match(OMEif$UID, OME$UID)] > OMEif$OME <- OME$OME[match(OMEif$UID, OME$UID)] > OMEif <- OMEif[OMEif$L75 > 30,] > summary(lm(L75 ~ Noise/Age, data = OMEif, na.action = na.omit)) Call: lm(formula = L75 ~ Noise/Age, data = OMEif, na.action = na.omit) Residuals: Min 1Q Median 3Q Max -13.0022 -1.9878 0.3346 2.0229 16.3260 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 47.73580 0.76456 62.435 < 2e-16 *** Noiseincoherent -4.87352 1.11247 -4.381 1.92e-05 *** Noisecoherent:Age -0.02785 0.02349 -1.186 0.237 Noiseincoherent:Age -0.12219 0.02589 -4.719 4.50e-06 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 3.774 on 196 degrees of freedom (17 observations deleted due to missingness) Multiple R-squared: 0.5246, Adjusted R-squared: 0.5173 F-statistic: 72.09 on 3 and 196 DF, p-value: < 2.2e-16 > summary(lm(L75 ~ Noise/(Age + OME), data = OMEif, + subset = (Age >= 30 & Age <= 60), + na.action = na.omit), cor = FALSE) Call: lm(formula = L75 ~ Noise/(Age + OME), data = OMEif, subset = (Age >= 30 & Age <= 60), na.action = na.omit) Residuals: Min 1Q Median 3Q Max -10.4514 -2.0588 0.0194 1.6827 15.9738 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 50.21090 1.74482 28.777 < 2e-16 *** Noiseincoherent -5.97491 2.70148 -2.212 0.02890 * Noisecoherent:Age -0.09358 0.03586 -2.609 0.01023 * Noiseincoherent:Age -0.15155 0.04151 -3.651 0.00039 *** Noisecoherent:OMElow 0.45103 1.07594 0.419 0.67583 Noiseincoherent:OMElow -0.14075 1.24537 -0.113 0.91021 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 3.7 on 119 degrees of freedom (17 observations deleted due to missingness) Multiple R-squared: 0.6073, Adjusted R-squared: 0.5908 F-statistic: 36.81 on 5 and 119 DF, p-value: < 2.2e-16 > > # Or fit by weighted least squares > fpl75 <- deriv(~ sqrt(n)*(r/n - 0.5 - 0.5/(1 + exp(-(x-L75)/scal))), + c("L75", "scal"), + function(r,n,x,L75,scal) NULL) > nls(0 ~ fpl75(Correct, Trials, Loud, L75, scal), + data = OME[OME$Noise == "coherent",], + start = c(L75=45, scal=3)) Nonlinear regression model model: 0 ~ fpl75(Correct, Trials, Loud, L75, scal) data: OME[OME$Noise == "coherent", ] L75 scal 47.798 1.296 residual sum-of-squares: 91.72 Number of iterations to convergence: 5 Achieved convergence tolerance: 9.302e-06 > nls(0 ~ fpl75(Correct, Trials, Loud, L75, scal), + data = OME[OME$Noise == "incoherent",], + start = c(L75=45, scal=3)) Nonlinear regression model model: 0 ~ fpl75(Correct, Trials, Loud, L75, scal) data: OME[OME$Noise == "incoherent", ] L75 scal 38.553 2.078 residual sum-of-squares: 60.19 Number of iterations to convergence: 8 Achieved convergence tolerance: 4.55e-06 > > # Test to see if the curves shift with age > fpl75age <- deriv(~sqrt(n)*(r/n - 0.5 - 0.5/(1 + + exp(-(x-L75-slope*age)/scal))), + c("L75", "slope", "scal"), + function(r,n,x,age,L75,slope,scal) NULL) > OME.nls1 <- + nls(0 ~ fpl75age(Correct, Trials, Loud, Age, L75, slope, scal), + data = OME[OME$Noise == "coherent",], + start = c(L75=45, slope=0, scal=2)) > sqrt(diag(vcov(OME.nls1))) L75 slope scal 0.61091761 0.01665916 0.17566450 > > OME.nls2 <- + nls(0 ~ fpl75age(Correct, Trials, Loud, Age, L75, slope, scal), + data = OME[OME$Noise == "incoherent",], + start = c(L75=45, slope=0, scal=2)) > sqrt(diag(vcov(OME.nls2))) L75 slope scal 0.49553854 0.01348281 0.24453836 > > # Now allow random effects by using NLME > OMEf <- OME[rep(1:nrow(OME), OME$Trials),] > OMEf$Resp <- with(OME, rep(rep(c(1,0), length(Trials)), + t(cbind(Correct, Trials-Correct)))) > OMEf <- OMEf[, -match(c("Correct", "Trials"), names(OMEf))] > > ## Not run: > ##D ## these fail in R on most platforms > ##D fp2 <- deriv(~ 0.5 + 0.5/(1 + exp(-(x-L75)/exp(lsc))), > ##D c("L75", "lsc"), > ##D function(x, L75, lsc) NULL) > ##D try(summary(nlme(Resp ~ fp2(Loud, L75, lsc), > ##D fixed = list(L75 ~ Age, lsc ~ 1), > ##D random = L75 + lsc ~ 1 | UID, > ##D data = OMEf[OMEf$Noise == "coherent",], method = "ML", > ##D start = list(fixed=c(L75=c(48.7, -0.03), lsc=0.24)), verbose = TRUE))) > ##D > ##D try(summary(nlme(Resp ~ fp2(Loud, L75, lsc), > ##D fixed = list(L75 ~ Age, lsc ~ 1), > ##D random = L75 + lsc ~ 1 | UID, > ##D data = OMEf[OMEf$Noise == "incoherent",], method = "ML", > ##D start = list(fixed=c(L75=c(41.5, -0.1), lsc=0)), verbose = TRUE))) > ## End(Not run) > > > cleanEx() detaching ‘package:nlme’ > nameEx("Skye") > ### * Skye > > flush(stderr()); flush(stdout()) > > ### Name: Skye > ### Title: AFM Compositions of Aphyric Skye Lavas > ### Aliases: Skye > ### Keywords: datasets > > ### ** Examples > > # ternary() is from the on-line answers. > ternary <- function(X, pch = par("pch"), lcex = 1, + add = FALSE, ord = 1:3, ...) + { + X <- as.matrix(X) + if(any(X < 0)) stop("X must be non-negative") + s <- drop(X %*% rep(1, ncol(X))) + if(any(s<=0)) stop("each row of X must have a positive sum") + if(max(abs(s-1)) > 1e-6) { + warning("row(s) of X will be rescaled") + X <- X / s + } + X <- X[, ord] + s3 <- sqrt(1/3) + if(!add) + { + oldpty <- par("pty") + on.exit(par(pty=oldpty)) + par(pty="s") + plot(c(-s3, s3), c(0.5-s3, 0.5+s3), type="n", axes=FALSE, + xlab="", ylab="") + polygon(c(0, -s3, s3), c(1, 0, 0), density=0) + lab <- NULL + if(!is.null(dn <- dimnames(X))) lab <- dn[[2]] + if(length(lab) < 3) lab <- as.character(1:3) + eps <- 0.05 * lcex + text(c(0, s3+eps*0.7, -s3-eps*0.7), + c(1+eps, -0.1*eps, -0.1*eps), lab, cex=lcex) + } + points((X[,2] - X[,3])*s3, X[,1], ...) + } > > ternary(Skye/100, ord=c(1,3,2)) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("addterm") > ### * addterm > > flush(stderr()); flush(stdout()) > > ### Name: addterm > ### Title: Try All One-Term Additions to a Model > ### Aliases: addterm addterm.default addterm.glm addterm.lm > ### Keywords: models > > ### ** Examples > > quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) > quine.lo <- aov(log(Days+2.5) ~ 1, quine) > addterm(quine.lo, quine.hi, test="F") Single term additions Model: log(Days + 2.5) ~ 1 Df Sum of Sq RSS AIC F Value Pr(F) 106.787 -43.664 Eth 1 10.6820 96.105 -57.052 16.0055 0.0001006 *** Sex 1 0.5969 106.190 -42.483 0.8094 0.3698057 Age 3 4.7469 102.040 -44.303 2.2019 0.0904804 . Lrn 1 0.0043 106.783 -41.670 0.0058 0.9392083 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family=poisson, + data=housing) > addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test="Chisq") Single term additions Model: Freq ~ Infl * Type * Cont + Sat Df Deviance AIC LRT Pr(Chi) 217.46 610.43 Infl:Sat 4 111.08 512.05 106.371 < 2.2e-16 *** Type:Sat 6 156.79 561.76 60.669 3.292e-11 *** Cont:Sat 2 212.33 609.30 5.126 0.07708 . --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) > addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq") Single term additions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Type:Sat 12 16.107 457.08 22.5550 0.03175 * Infl:Cont:Sat 4 37.472 462.44 1.1901 0.87973 Type:Cont:Sat 6 28.256 457.23 10.4064 0.10855 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("anova.negbin") > ### * anova.negbin > > flush(stderr()); flush(stdout()) > > ### Name: anova.negbin > ### Title: Likelihood Ratio Tests for Negative Binomial GLMs > ### Aliases: anova.negbin > ### Keywords: regression > > ### ** Examples > > m1 <- glm.nb(Days ~ Eth*Age*Lrn*Sex, quine, link = log) > m2 <- update(m1, . ~ . - Eth:Age:Lrn:Sex) > anova(m2, m1) Likelihood ratio tests of Negative Binomial Models Response: Days Model 1 Eth + Age + Lrn + Sex + Eth:Age + Eth:Lrn + Age:Lrn + Eth:Sex + Age:Sex + Lrn:Sex + Eth:Age:Lrn + Eth:Age:Sex + Eth:Lrn:Sex + Age:Lrn:Sex 2 Eth * Age * Lrn * Sex theta Resid. df 2 x log-lik. Test df LR stat. Pr(Chi) 1 1.90799 120 -1040.728 2 1.92836 118 -1039.324 1 vs 2 2 1.403843 0.4956319 > anova(m2) Warning in anova.negbin(m2) : tests made without re-estimating 'theta' Analysis of Deviance Table Model: Negative Binomial(1.908), link: log Response: Days Terms added sequentially (first to last) Df Deviance Resid. Df Resid. Dev Pr(>Chi) NULL 145 270.03 Eth 1 19.0989 144 250.93 1.241e-05 *** Age 3 16.3483 141 234.58 0.000962 *** Lrn 1 3.5449 140 231.04 0.059730 . Sex 1 0.3989 139 230.64 0.527666 Eth:Age 3 14.6030 136 216.03 0.002189 ** Eth:Lrn 1 0.0447 135 215.99 0.832601 Age:Lrn 2 1.7482 133 214.24 0.417240 Eth:Sex 1 1.1470 132 213.09 0.284183 Age:Sex 3 21.9746 129 191.12 6.603e-05 *** Lrn:Sex 1 0.0277 128 191.09 0.867712 Eth:Age:Lrn 2 9.0099 126 182.08 0.011054 * Eth:Age:Sex 3 4.8218 123 177.26 0.185319 Eth:Lrn:Sex 1 3.3160 122 173.94 0.068608 . Age:Lrn:Sex 2 6.3941 120 167.55 0.040882 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("area") > ### * area > > flush(stderr()); flush(stdout()) > > ### Name: area > ### Title: Adaptive Numerical Integration > ### Aliases: area > ### Keywords: nonlinear > > ### ** Examples > > area(sin, 0, pi) # integrate the sin function from 0 to pi. [1] 2 > > > > cleanEx() > nameEx("bacteria") > ### * bacteria > > flush(stderr()); flush(stdout()) > > ### Name: bacteria > ### Title: Presence of Bacteria after Drug Treatments > ### Aliases: bacteria > ### Keywords: datasets > > ### ** Examples > > contrasts(bacteria$trt) <- structure(contr.sdif(3), + dimnames = list(NULL, c("drug", "encourage"))) > ## fixed effects analyses > summary(glm(y ~ trt * week, binomial, data = bacteria)) Call: glm(formula = y ~ trt * week, family = binomial, data = bacteria) Deviance Residuals: Min 1Q Median 3Q Max -2.2144 0.4245 0.5373 0.6750 1.0697 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.97548 0.30053 6.573 4.92e-11 *** trtdrug -0.99848 0.69490 -1.437 0.15075 trtencourage 0.83865 0.73482 1.141 0.25374 week -0.11814 0.04460 -2.649 0.00807 ** trtdrug:week -0.01722 0.10570 -0.163 0.87061 trtencourage:week -0.07043 0.10964 -0.642 0.52060 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for binomial family taken to be 1) Null deviance: 217.38 on 219 degrees of freedom Residual deviance: 203.12 on 214 degrees of freedom AIC: 215.12 Number of Fisher Scoring iterations: 4 > summary(glm(y ~ trt + week, binomial, data = bacteria)) Call: glm(formula = y ~ trt + week, family = binomial, data = bacteria) Deviance Residuals: Min 1Q Median 3Q Max -2.2899 0.3885 0.5400 0.7027 1.1077 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.96018 0.29705 6.599 4.15e-11 *** trtdrug -1.10667 0.42519 -2.603 0.00925 ** trtencourage 0.45502 0.42766 1.064 0.28735 week -0.11577 0.04414 -2.623 0.00872 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for binomial family taken to be 1) Null deviance: 217.38 on 219 degrees of freedom Residual deviance: 203.81 on 216 degrees of freedom AIC: 211.81 Number of Fisher Scoring iterations: 4 > summary(glm(y ~ trt + I(week > 2), binomial, data = bacteria)) Call: glm(formula = y ~ trt + I(week > 2), family = binomial, data = bacteria) Deviance Residuals: Min 1Q Median 3Q Max -2.4043 0.3381 0.5754 0.6237 1.0051 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 2.2479 0.3560 6.315 2.71e-10 *** trtdrug -1.1187 0.4288 -2.609 0.00909 ** trtencourage 0.4815 0.4330 1.112 0.26614 I(week > 2)TRUE -1.2949 0.4104 -3.155 0.00160 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for binomial family taken to be 1) Null deviance: 217.38 on 219 degrees of freedom Residual deviance: 199.18 on 216 degrees of freedom AIC: 207.18 Number of Fisher Scoring iterations: 5 > > # conditional random-effects analysis > library(survival) > bacteria$Time <- rep(1, nrow(bacteria)) > coxph(Surv(Time, unclass(y)) ~ week + strata(ID), + data = bacteria, method = "exact") Call: coxph(formula = Surv(Time, unclass(y)) ~ week + strata(ID), data = bacteria, method = "exact") coef exp(coef) se(coef) z p week -0.1626 0.8500 0.0547 -2.97 0.003 Likelihood ratio test=9.85 on 1 df, p=0.0017 n= 220, number of events= 177 > coxph(Surv(Time, unclass(y)) ~ factor(week) + strata(ID), + data = bacteria, method = "exact") Call: coxph(formula = Surv(Time, unclass(y)) ~ factor(week) + strata(ID), data = bacteria, method = "exact") coef exp(coef) se(coef) z p factor(week)2 0.198 1.219 0.724 0.27 0.784 factor(week)4 -1.421 0.242 0.667 -2.13 0.033 factor(week)6 -1.661 0.190 0.682 -2.43 0.015 factor(week)11 -1.675 0.187 0.678 -2.47 0.013 Likelihood ratio test=15.4 on 4 df, p=0.00385 n= 220, number of events= 177 > coxph(Surv(Time, unclass(y)) ~ I(week > 2) + strata(ID), + data = bacteria, method = "exact") Call: coxph(formula = Surv(Time, unclass(y)) ~ I(week > 2) + strata(ID), data = bacteria, method = "exact") coef exp(coef) se(coef) z p I(week > 2)TRUE -1.670 0.188 0.482 -3.47 0.00053 Likelihood ratio test=15.2 on 1 df, p=9.93e-05 n= 220, number of events= 177 > > # PQL glmm analysis > library(nlme) > summary(glmmPQL(y ~ trt + I(week > 2), random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.410637 0.7800511 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + I(week > 2) Value Std.Error DF t-value p-value (Intercept) 2.7447864 0.3784193 169 7.253294 0.0000 trtdrug -1.2473553 0.6440635 47 -1.936696 0.0588 trtencourage 0.4930279 0.6699339 47 0.735935 0.4654 I(week > 2)TRUE -1.6072570 0.3583379 169 -4.485311 0.0000 Correlation: (Intr) trtdrg trtncr trtdrug 0.009 trtencourage 0.036 -0.518 I(week > 2)TRUE -0.710 0.047 -0.046 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -5.1985361 0.1572336 0.3513075 0.4949482 1.7448845 Number of Observations: 220 Number of Groups: 50 > > > > cleanEx() detaching ‘package:nlme’, ‘package:survival’ > nameEx("bandwidth.nrd") > ### * bandwidth.nrd > > flush(stderr()); flush(stdout()) > > ### Name: bandwidth.nrd > ### Title: Bandwidth for density() via Normal Reference Distribution > ### Aliases: bandwidth.nrd > ### Keywords: dplot > > ### ** Examples > > # The function is currently defined as > function(x) + { + r <- quantile(x, c(0.25, 0.75)) + h <- (r[2] - r[1])/1.34 + 4 * 1.06 * min(sqrt(var(x)), h) * length(x)^(-1/5) + } function (x) { r <- quantile(x, c(0.25, 0.75)) h <- (r[2] - r[1])/1.34 4 * 1.06 * min(sqrt(var(x)), h) * length(x)^(-1/5) } > > > > cleanEx() > nameEx("bcv") > ### * bcv > > flush(stderr()); flush(stdout()) > > ### Name: bcv > ### Title: Biased Cross-Validation for Bandwidth Selection > ### Aliases: bcv > ### Keywords: dplot > > ### ** Examples > > bcv(geyser$duration) [1] 0.8940809 > > > > cleanEx() > nameEx("beav1") > ### * beav1 > > flush(stderr()); flush(stdout()) > > ### Name: beav1 > ### Title: Body Temperature Series of Beaver 1 > ### Aliases: beav1 > ### Keywords: datasets > > ### ** Examples > > beav1 <- within(beav1, + hours <- 24*(day-346) + trunc(time/100) + (time%%100)/60) > plot(beav1$hours, beav1$temp, type="l", xlab="time", + ylab="temperature", main="Beaver 1") > usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr=usr) > lines(beav1$hours, beav1$activ, type="s", lty=2) > temp <- ts(c(beav1$temp[1:82], NA, beav1$temp[83:114]), + start = 9.5, frequency = 6) > activ <- ts(c(beav1$activ[1:82], NA, beav1$activ[83:114]), + start = 9.5, frequency = 6) > > acf(temp[1:53]) > acf(temp[1:53], type = "partial") > ar(temp[1:53]) Call: ar(x = temp[1:53]) Coefficients: 1 0.8222 Order selected 1 sigma^2 estimated as 0.01011 > act <- c(rep(0, 10), activ) > X <- cbind(1, act = act[11:125], act1 = act[10:124], + act2 = act[9:123], act3 = act[8:122]) > alpha <- 0.80 > stemp <- as.vector(temp - alpha*lag(temp, -1)) > sX <- X[-1, ] - alpha * X[-115,] > beav1.ls <- lm(stemp ~ -1 + sX, na.action = na.omit) > summary(beav1.ls, cor = FALSE) Call: lm(formula = stemp ~ -1 + sX, na.action = na.omit) Residuals: Min 1Q Median 3Q Max -0.21317 -0.04317 0.00683 0.05483 0.37683 Coefficients: Estimate Std. Error t value Pr(>|t|) sX 36.85587 0.03922 939.833 < 2e-16 *** sXact 0.25400 0.03930 6.464 3.37e-09 *** sXact1 0.17096 0.05100 3.352 0.00112 ** sXact2 0.16202 0.05147 3.148 0.00215 ** sXact3 0.10548 0.04310 2.448 0.01605 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.08096 on 104 degrees of freedom (5 observations deleted due to missingness) Multiple R-squared: 0.9999, Adjusted R-squared: 0.9999 F-statistic: 1.81e+05 on 5 and 104 DF, p-value: < 2.2e-16 > rm(temp, activ) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("beav2") > ### * beav2 > > flush(stderr()); flush(stdout()) > > ### Name: beav2 > ### Title: Body Temperature Series of Beaver 2 > ### Aliases: beav2 > ### Keywords: datasets > > ### ** Examples > > attach(beav2) > beav2$hours <- 24*(day-307) + trunc(time/100) + (time%%100)/60 > plot(beav2$hours, beav2$temp, type = "l", xlab = "time", + ylab = "temperature", main = "Beaver 2") > usr <- par("usr"); usr[3:4] <- c(-0.2, 8); par(usr = usr) > lines(beav2$hours, beav2$activ, type = "s", lty = 2) > > temp <- ts(temp, start = 8+2/3, frequency = 6) > activ <- ts(activ, start = 8+2/3, frequency = 6) > acf(temp[activ == 0]); acf(temp[activ == 1]) # also look at PACFs > ar(temp[activ == 0]); ar(temp[activ == 1]) Call: ar(x = temp[activ == 0]) Coefficients: 1 0.7392 Order selected 1 sigma^2 estimated as 0.02011 Call: ar(x = temp[activ == 1]) Coefficients: 1 0.7894 Order selected 1 sigma^2 estimated as 0.01792 > > arima(temp, order = c(1,0,0), xreg = activ) Call: arima(x = temp, order = c(1, 0, 0), xreg = activ) Coefficients: ar1 intercept activ 0.8733 37.1920 0.6139 s.e. 0.0684 0.1187 0.1381 sigma^2 estimated as 0.01518: log likelihood = 66.78, aic = -125.55 > dreg <- cbind(sin = sin(2*pi*beav2$hours/24), cos = cos(2*pi*beav2$hours/24)) > arima(temp, order = c(1,0,0), xreg = cbind(active=activ, dreg)) Call: arima(x = temp, order = c(1, 0, 0), xreg = cbind(active = activ, dreg)) Coefficients: ar1 intercept active dreg.sin dreg.cos 0.7905 37.1674 0.5322 -0.282 0.1201 s.e. 0.0681 0.0939 0.1282 0.105 0.0997 sigma^2 estimated as 0.01434: log likelihood = 69.83, aic = -127.67 > > library(nlme) # for gls and corAR1 > beav2.gls <- gls(temp ~ activ, data = beav2, corr = corAR1(0.8), + method = "ML") > summary(beav2.gls) Generalized least squares fit by maximum likelihood Model: temp ~ activ Data: beav2 AIC BIC logLik -125.5505 -115.1298 66.77523 Correlation Structure: AR(1) Formula: ~1 Parameter estimate(s): Phi 0.8731771 Coefficients: Value Std.Error t-value p-value (Intercept) 37.19195 0.1131328 328.7460 0 activ 0.61418 0.1087286 5.6487 0 Correlation: (Intr) activ -0.582 Standardized residuals: Min Q1 Med Q3 Max -2.42080780 -0.61510520 -0.03573836 0.81641138 2.15153499 Residual standard error: 0.2527856 Degrees of freedom: 100 total; 98 residual > summary(update(beav2.gls, subset = 6:100)) Generalized least squares fit by maximum likelihood Model: temp ~ activ Data: beav2 Subset: 6:100 AIC BIC logLik -124.981 -114.7654 66.49048 Correlation Structure: AR(1) Formula: ~1 Parameter estimate(s): Phi 0.8380448 Coefficients: Value Std.Error t-value p-value (Intercept) 37.25001 0.09634047 386.6496 0 activ 0.60277 0.09931904 6.0690 0 Correlation: (Intr) activ -0.657 Standardized residuals: Min Q1 Med Q3 Max -2.0231494 -0.8910348 -0.1497564 0.7640939 2.2719468 Residual standard error: 0.2188542 Degrees of freedom: 95 total; 93 residual > detach("beav2"); rm(temp, activ) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() detaching ‘package:nlme’ > nameEx("birthwt") > ### * birthwt > > flush(stderr()); flush(stdout()) > > ### Name: birthwt > ### Title: Risk Factors Associated with Low Infant Birth Weight > ### Aliases: birthwt > ### Keywords: datasets > > ### ** Examples > > bwt <- with(birthwt, { + race <- factor(race, labels = c("white", "black", "other")) + ptd <- factor(ptl > 0) + ftv <- factor(ftv) + levels(ftv)[-(1:2)] <- "2+" + data.frame(low = factor(low), age, lwt, race, smoke = (smoke > 0), + ptd, ht = (ht > 0), ui = (ui > 0), ftv) + }) > options(contrasts = c("contr.treatment", "contr.poly")) > glm(low ~ ., binomial, bwt) Call: glm(formula = low ~ ., family = binomial, data = bwt) Coefficients: (Intercept) age lwt raceblack raceother smokeTRUE 0.82302 -0.03723 -0.01565 1.19241 0.74068 0.75553 ptdTRUE htTRUE uiTRUE ftv1 ftv2+ 1.34376 1.91317 0.68020 -0.43638 0.17901 Degrees of Freedom: 188 Total (i.e. Null); 178 Residual Null Deviance: 234.7 Residual Deviance: 195.5 AIC: 217.5 > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("boxcox") > ### * boxcox > > flush(stderr()); flush(stdout()) > > ### Name: boxcox > ### Title: Box-Cox Transformations for Linear Models > ### Aliases: boxcox boxcox.default boxcox.formula boxcox.lm > ### Keywords: regression models hplot > > ### ** Examples > > boxcox(Volume ~ log(Height) + log(Girth), data = trees, + lambda = seq(-0.25, 0.25, length = 10)) > > boxcox(Days+1 ~ Eth*Sex*Age*Lrn, data = quine, + lambda = seq(-0.05, 0.45, len = 20)) > > > > cleanEx() > nameEx("caith") > ### * caith > > flush(stderr()); flush(stdout()) > > ### Name: caith > ### Title: Colours of Eyes and Hair of People in Caithness > ### Aliases: caith > ### Keywords: datasets > > ### ** Examples > > corresp(caith) First canonical correlation(s): 0.4463684 Row scores: blue light medium dark -0.89679252 -0.98731818 0.07530627 1.57434710 Column scores: fair red medium dark black -1.21871379 -0.52257500 -0.09414671 1.31888486 2.45176017 > dimnames(caith)[[2]] <- c("F", "R", "M", "D", "B") > par(mfcol=c(1,3)) > plot(corresp(caith, nf=2)); title("symmetric") > plot(corresp(caith, nf=2), type="rows"); title("rows") > plot(corresp(caith, nf=2), type="col"); title("columns") > par(mfrow=c(1,1)) > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("cement") > ### * cement > > flush(stderr()); flush(stdout()) > > ### Name: cement > ### Title: Heat Evolved by Setting Cements > ### Aliases: cement > ### Keywords: datasets > > ### ** Examples > > lm(y ~ x1 + x2 + x3 + x4, cement) Call: lm(formula = y ~ x1 + x2 + x3 + x4, data = cement) Coefficients: (Intercept) x1 x2 x3 x4 62.4054 1.5511 0.5102 0.1019 -0.1441 > > > > cleanEx() > nameEx("confint") > ### * confint > > flush(stderr()); flush(stdout()) > > ### Name: confint-MASS > ### Title: Confidence Intervals for Model Parameters > ### Aliases: confint.glm confint.nls confint.profile.glm > ### confint.profile.nls > ### Keywords: models > > ### ** Examples > > expn1 <- deriv(y ~ b0 + b1 * 2^(-x/th), c("b0", "b1", "th"), + function(b0, b1, th, x) {}) > > wtloss.gr <- nls(Weight ~ expn1(b0, b1, th, Days), + data = wtloss, start = c(b0=90, b1=95, th=120)) > > expn2 <- deriv(~b0 + b1*((w0 - b0)/b1)^(x/d0), + c("b0","b1","d0"), function(b0, b1, d0, x, w0) {}) > > wtloss.init <- function(obj, w0) { + p <- coef(obj) + d0 <- - log((w0 - p["b0"])/p["b1"])/log(2) * p["th"] + c(p[c("b0", "b1")], d0 = as.vector(d0)) + } > > out <- NULL > w0s <- c(110, 100, 90) > for(w0 in w0s) { + fm <- nls(Weight ~ expn2(b0, b1, d0, Days, w0), + wtloss, start = wtloss.init(wtloss.gr, w0)) + out <- rbind(out, c(coef(fm)["d0"], confint(fm, "d0"))) + } Waiting for profiling to be done... Waiting for profiling to be done... Waiting for profiling to be done... > dimnames(out) <- list(paste(w0s, "kg:"), c("d0", "low", "high")) > out d0 low high 110 kg: 261.5132 256.2303 267.5009 100 kg: 349.4979 334.7293 368.0151 90 kg: 507.0941 457.2637 594.8745 > > ldose <- rep(0:5, 2) > numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) > sex <- factor(rep(c("M", "F"), c(6, 6))) > SF <- cbind(numdead, numalive = 20 - numdead) > budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial) > confint(budworm.lg0) Waiting for profiling to be done... 2.5 % 97.5 % sexF -4.4581438 -2.613610 sexM -3.1728745 -1.655117 ldose 0.8228708 1.339058 > confint(budworm.lg0, "ldose") Waiting for profiling to be done... 2.5 % 97.5 % 0.8228708 1.3390581 > > > > cleanEx() > nameEx("contr.sdif") > ### * contr.sdif > > flush(stderr()); flush(stdout()) > > ### Name: contr.sdif > ### Title: Successive Differences Contrast Coding > ### Aliases: contr.sdif > ### Keywords: models > > ### ** Examples > > (A <- contr.sdif(6)) 2-1 3-2 4-3 5-4 6-5 1 -0.8333333 -0.6666667 -0.5 -0.3333333 -0.1666667 2 0.1666667 -0.6666667 -0.5 -0.3333333 -0.1666667 3 0.1666667 0.3333333 -0.5 -0.3333333 -0.1666667 4 0.1666667 0.3333333 0.5 -0.3333333 -0.1666667 5 0.1666667 0.3333333 0.5 0.6666667 -0.1666667 6 0.1666667 0.3333333 0.5 0.6666667 0.8333333 > zapsmall(ginv(A)) [,1] [,2] [,3] [,4] [,5] [,6] [1,] -1 1 0 0 0 0 [2,] 0 -1 1 0 0 0 [3,] 0 0 -1 1 0 0 [4,] 0 0 0 -1 1 0 [5,] 0 0 0 0 -1 1 > > > > cleanEx() > nameEx("corresp") > ### * corresp > > flush(stderr()); flush(stdout()) > > ### Name: corresp > ### Title: Simple Correspondence Analysis > ### Aliases: corresp corresp.xtabs corresp.data.frame corresp.default > ### corresp.factor corresp.formula corresp.matrix > ### Keywords: category multivariate > > ### ** Examples > > (ct <- corresp(~ Age + Eth, data = quine)) First canonical correlation(s): 0.05317534 Age scores: F0 F1 F2 F3 -0.3344445 1.4246090 -1.0320002 -0.4612728 Eth scores: A N -1.0563816 0.9466276 > plot(ct) > > corresp(caith) First canonical correlation(s): 0.4463684 Row scores: blue light medium dark -0.89679252 -0.98731818 0.07530627 1.57434710 Column scores: fair red medium dark black -1.21871379 -0.52257500 -0.09414671 1.31888486 2.45176017 > biplot(corresp(caith, nf = 2)) > > > > cleanEx() > nameEx("cov.rob") > ### * cov.rob > > flush(stderr()); flush(stdout()) > > ### Name: cov.rob > ### Title: Resistant Estimation of Multivariate Location and Scatter > ### Aliases: cov.rob cov.mve cov.mcd > ### Keywords: robust multivariate > > ### ** Examples > > set.seed(123) > cov.rob(stackloss) $center Air.Flow Water.Temp Acid.Conc. stack.loss 56.3750 20.0000 85.4375 13.0625 $cov Air.Flow Water.Temp Acid.Conc. stack.loss Air.Flow 23.050000 6.666667 16.625000 19.308333 Water.Temp 6.666667 5.733333 5.333333 7.733333 Acid.Conc. 16.625000 5.333333 34.395833 13.837500 stack.loss 19.308333 7.733333 13.837500 18.462500 $msg [1] "20 singular samples of size 5 out of 2500" $crit [1] 19.89056 $best [1] 5 6 7 8 9 10 11 12 15 16 18 19 20 $n.obs [1] 21 > cov.rob(stack.x, method = "mcd", nsamp = "exact") $center Air.Flow Water.Temp Acid.Conc. 56.70588 20.23529 85.52941 $cov Air.Flow Water.Temp Acid.Conc. Air.Flow 23.470588 7.573529 16.102941 Water.Temp 7.573529 6.316176 5.367647 Acid.Conc. 16.102941 5.367647 32.389706 $msg [1] "266 singular samples of size 4 out of 5985" $crit [1] 5.472581 $best [1] 4 5 6 7 8 9 10 11 12 13 14 20 $n.obs [1] 21 > > > > cleanEx() > nameEx("cov.trob") > ### * cov.trob > > flush(stderr()); flush(stdout()) > > ### Name: cov.trob > ### Title: Covariance Estimation for Multivariate t Distribution > ### Aliases: cov.trob > ### Keywords: multivariate > > ### ** Examples > > cov.trob(stackloss) $cov Air.Flow Water.Temp Acid.Conc. stack.loss Air.Flow 60.47035 17.027203 18.554452 62.28032 Water.Temp 17.02720 8.085857 5.604132 20.50469 Acid.Conc. 18.55445 5.604132 24.404633 16.91085 stack.loss 62.28032 20.504687 16.910855 72.80743 $center Air.Flow Water.Temp Acid.Conc. stack.loss 58.96905 20.79263 86.05588 16.09028 $n.obs [1] 21 $call cov.trob(x = stackloss) $iter [1] 5 > > > > cleanEx() > nameEx("denumerate") > ### * denumerate > > flush(stderr()); flush(stdout()) > > ### Name: denumerate > ### Title: Transform an Allowable Formula for 'loglm' into one for 'terms' > ### Aliases: denumerate denumerate.formula > ### Keywords: models > > ### ** Examples > > denumerate(~(1+2+3)^3 + a/b) ~(.v1 + .v2 + .v3)^3 + a/b > ## which gives ~ (.v1 + .v2 + .v3)^3 + a/b > > > > cleanEx() > nameEx("dose.p") > ### * dose.p > > flush(stderr()); flush(stdout()) > > ### Name: dose.p > ### Title: Predict Doses for Binomial Assay model > ### Aliases: dose.p print.glm.dose > ### Keywords: regression models > > ### ** Examples > > ldose <- rep(0:5, 2) > numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) > sex <- factor(rep(c("M", "F"), c(6, 6))) > SF <- cbind(numdead, numalive = 20 - numdead) > budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial) > > dose.p(budworm.lg0, cf = c(1,3), p = 1:3/4) Dose SE p = 0.25: 2.231265 0.2499089 p = 0.50: 3.263587 0.2297539 p = 0.75: 4.295910 0.2746874 > dose.p(update(budworm.lg0, family = binomial(link=probit)), + cf = c(1,3), p = 1:3/4) Dose SE p = 0.25: 2.191229 0.2384478 p = 0.50: 3.257703 0.2240685 p = 0.75: 4.324177 0.2668745 > > > > cleanEx() > nameEx("dropterm") > ### * dropterm > > flush(stderr()); flush(stdout()) > > ### Name: dropterm > ### Title: Try All One-Term Deletions from a Model > ### Aliases: dropterm dropterm.default dropterm.glm dropterm.lm > ### Keywords: models > > ### ** Examples > > quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) > quine.nxt <- update(quine.hi, . ~ . - Eth:Sex:Age:Lrn) > dropterm(quine.nxt, test= "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Age + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 64.099 -68.184 Eth:Sex:Age 3 0.97387 65.073 -71.982 0.60773 0.61125 Eth:Sex:Lrn 1 1.57879 65.678 -66.631 2.95567 0.08816 . Eth:Age:Lrn 2 2.12841 66.227 -67.415 1.99230 0.14087 Sex:Age:Lrn 2 1.46623 65.565 -68.882 1.37247 0.25743 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.stp <- stepAIC(quine.nxt, + scope = list(upper = ~Eth*Sex*Age*Lrn, lower = ~1), + trace = FALSE) > dropterm(quine.stp, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 66.600 -72.597 Sex:Age 3 10.7959 77.396 -56.663 6.7542 0.0002933 *** Eth:Sex:Lrn 1 3.0325 69.632 -68.096 5.6916 0.0185476 * Eth:Age:Lrn 2 2.0960 68.696 -72.072 1.9670 0.1441822 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.3 <- update(quine.stp, . ~ . - Eth:Age:Lrn) > dropterm(quine.3, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 68.696 -72.072 Eth:Age 3 3.0312 71.727 -71.768 1.8679 0.1383323 Sex:Age 3 11.4272 80.123 -55.607 7.0419 0.0002037 *** Age:Lrn 2 2.8149 71.511 -70.209 2.6020 0.0780701 . Eth:Sex:Lrn 1 4.6956 73.391 -64.419 8.6809 0.0038268 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.4 <- update(quine.3, . ~ . - Eth:Age) > dropterm(quine.4, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 71.727 -71.768 Sex:Age 3 11.5656 83.292 -55.942 6.9873 0.0002147 *** Age:Lrn 2 2.9118 74.639 -69.959 2.6387 0.0752793 . Eth:Sex:Lrn 1 6.8181 78.545 -60.511 12.3574 0.0006052 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > quine.5 <- update(quine.4, . ~ . - Age:Lrn) > dropterm(quine.5, test = "F") Single term deletions Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Lrn + Sex:Age + Sex:Lrn + Eth:Sex:Lrn Df Sum of Sq RSS AIC F Value Pr(F) 74.639 -69.959 Sex:Age 3 9.9002 84.539 -57.774 5.8362 0.0008944 *** Eth:Sex:Lrn 1 6.2988 80.937 -60.130 11.1396 0.0010982 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family=poisson, + data = housing) > house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) > dropterm(house.glm1, test = "Chisq") Single term deletions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Sat 4 147.780 556.75 109.117 < 2.2e-16 *** Type:Sat 6 100.889 505.86 62.227 1.586e-11 *** Cont:Sat 2 54.722 467.69 16.060 0.0003256 *** Infl:Type:Cont 6 43.952 448.92 5.290 0.5072454 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > > > cleanEx() > nameEx("eagles") > ### * eagles > > flush(stderr()); flush(stdout()) > > ### Name: eagles > ### Title: Foraging Ecology of Bald Eagles > ### Aliases: eagles > ### Keywords: datasets > > ### ** Examples > > eagles.glm <- glm(cbind(y, n - y) ~ P*A + V, data = eagles, + family = binomial) > dropterm(eagles.glm) Single term deletions Model: cbind(y, n - y) ~ P * A + V Df Deviance AIC 0.333 23.073 V 1 53.737 74.478 P:A 1 6.956 27.696 > prof <- profile(eagles.glm) > plot(prof) > pairs(prof) > > > > cleanEx() > nameEx("epil") > ### * epil > > flush(stderr()); flush(stdout()) > > ### Name: epil > ### Title: Seizure Counts for Epileptics > ### Aliases: epil > ### Keywords: datasets > > ### ** Examples > > summary(glm(y ~ lbase*trt + lage + V4, family = poisson, + data = epil), cor = FALSE) Call: glm(formula = y ~ lbase * trt + lage + V4, family = poisson, data = epil) Deviance Residuals: Min 1Q Median 3Q Max -5.0915 -1.4126 -0.2739 0.7580 10.7711 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.89791 0.04260 44.552 < 2e-16 *** lbase 0.94862 0.04360 21.759 < 2e-16 *** trtprogabide -0.34588 0.06100 -5.670 1.42e-08 *** lage 0.88760 0.11650 7.619 2.56e-14 *** V4 -0.15977 0.05458 -2.927 0.00342 ** lbase:trtprogabide 0.56154 0.06352 8.841 < 2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 2517.83 on 235 degrees of freedom Residual deviance: 869.07 on 230 degrees of freedom AIC: 1647 Number of Fisher Scoring iterations: 5 > epil2 <- epil[epil$period == 1, ] > epil2["period"] <- rep(0, 59); epil2["y"] <- epil2["base"] > epil["time"] <- 1; epil2["time"] <- 4 > epil2 <- rbind(epil, epil2) > epil2$pred <- unclass(epil2$trt) * (epil2$period > 0) > epil2$subject <- factor(epil2$subject) > epil3 <- aggregate(epil2, list(epil2$subject, epil2$period > 0), + function(x) if(is.numeric(x)) sum(x) else x[1]) > epil3$pred <- factor(epil3$pred, + labels = c("base", "placebo", "drug")) > > contrasts(epil3$pred) <- structure(contr.sdif(3), + dimnames = list(NULL, c("placebo-base", "drug-placebo"))) > summary(glm(y ~ pred + factor(subject) + offset(log(time)), + family = poisson, data = epil3), cor = FALSE) Call: glm(formula = y ~ pred + factor(subject) + offset(log(time)), family = poisson, data = epil3) Deviance Residuals: Min 1Q Median 3Q Max -5.2928 -0.7350 0.0000 0.6997 4.7145 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 1.122e+00 2.008e-01 5.590 2.28e-08 *** predplacebo-base 1.087e-01 4.691e-02 2.318 0.020474 * preddrug-placebo -1.016e-01 6.507e-02 -1.561 0.118431 factor(subject)2 -2.300e-15 2.828e-01 0.000 1.000000 factor(subject)3 -3.857e-01 3.144e-01 -1.227 0.219894 factor(subject)4 -1.744e-01 2.960e-01 -0.589 0.555847 factor(subject)5 1.577e+00 2.197e-01 7.178 7.08e-13 *** factor(subject)6 6.729e-01 2.458e-01 2.738 0.006182 ** factor(subject)7 -4.082e-02 2.858e-01 -0.143 0.886411 factor(subject)8 1.758e+00 2.166e-01 8.117 4.77e-16 *** factor(subject)9 5.878e-01 2.494e-01 2.356 0.018454 * factor(subject)10 5.423e-01 2.515e-01 2.156 0.031060 * factor(subject)11 1.552e+00 2.202e-01 7.048 1.81e-12 *** factor(subject)12 9.243e-01 2.364e-01 3.910 9.22e-05 *** factor(subject)13 3.075e-01 2.635e-01 1.167 0.243171 factor(subject)14 1.212e+00 2.278e-01 5.320 1.04e-07 *** factor(subject)15 1.765e+00 2.164e-01 8.153 3.54e-16 *** factor(subject)16 9.708e-01 2.348e-01 4.134 3.57e-05 *** factor(subject)17 -4.082e-02 2.858e-01 -0.143 0.886411 factor(subject)18 2.236e+00 2.104e-01 10.629 < 2e-16 *** factor(subject)19 2.776e-01 2.651e-01 1.047 0.295060 factor(subject)20 3.646e-01 2.603e-01 1.401 0.161324 factor(subject)21 3.922e-02 2.801e-01 0.140 0.888645 factor(subject)22 -8.338e-02 2.889e-01 -0.289 0.772894 factor(subject)23 1.823e-01 2.708e-01 0.673 0.500777 factor(subject)24 8.416e-01 2.393e-01 3.517 0.000436 *** factor(subject)25 2.069e+00 2.123e-01 9.750 < 2e-16 *** factor(subject)26 -5.108e-01 3.266e-01 -1.564 0.117799 factor(subject)27 -2.231e-01 3.000e-01 -0.744 0.456990 factor(subject)28 1.386e+00 2.236e-01 6.200 5.66e-10 *** factor(subject)29 1.604e+00 2.227e-01 7.203 5.90e-13 *** factor(subject)30 1.023e+00 2.372e-01 4.313 1.61e-05 *** factor(subject)31 9.149e-02 2.821e-01 0.324 0.745700 factor(subject)32 -3.111e-02 2.909e-01 -0.107 0.914822 factor(subject)33 4.710e-01 2.597e-01 1.814 0.069736 . factor(subject)34 3.887e-01 2.640e-01 1.473 0.140879 factor(subject)35 1.487e+00 2.250e-01 6.609 3.87e-11 *** factor(subject)36 3.598e-01 2.656e-01 1.355 0.175551 factor(subject)37 -1.221e-01 2.979e-01 -0.410 0.681943 factor(subject)38 1.344e+00 2.283e-01 5.889 3.90e-09 *** factor(subject)39 1.082e+00 2.354e-01 4.596 4.30e-06 *** factor(subject)40 -7.687e-01 3.634e-01 -2.116 0.034384 * factor(subject)41 1.656e-01 2.772e-01 0.597 0.550234 factor(subject)42 5.227e-02 2.848e-01 0.184 0.854388 factor(subject)43 1.543e+00 2.239e-01 6.891 5.54e-12 *** factor(subject)44 9.605e-01 2.393e-01 4.014 5.96e-05 *** factor(subject)45 1.177e+00 2.326e-01 5.061 4.18e-07 *** factor(subject)46 -5.275e-01 3.355e-01 -1.572 0.115840 factor(subject)47 1.053e+00 2.363e-01 4.456 8.35e-06 *** factor(subject)48 -5.275e-01 3.355e-01 -1.572 0.115840 factor(subject)49 2.949e+00 2.082e-01 14.168 < 2e-16 *** factor(subject)50 3.887e-01 2.640e-01 1.473 0.140879 factor(subject)51 1.038e+00 2.367e-01 4.385 1.16e-05 *** factor(subject)52 5.711e-01 2.548e-01 2.241 0.025023 * factor(subject)53 1.670e+00 2.215e-01 7.538 4.76e-14 *** factor(subject)54 4.443e-01 2.611e-01 1.702 0.088759 . factor(subject)55 2.674e-01 2.709e-01 0.987 0.323618 factor(subject)56 1.124e+00 2.341e-01 4.800 1.59e-06 *** factor(subject)57 2.674e-01 2.709e-01 0.987 0.323618 factor(subject)58 -6.017e-01 3.436e-01 -1.751 0.079911 . factor(subject)59 -7.556e-02 2.942e-01 -0.257 0.797331 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 3180.82 on 117 degrees of freedom Residual deviance: 303.16 on 57 degrees of freedom AIC: 1003.5 Number of Fisher Scoring iterations: 5 > > summary(glmmPQL(y ~ lbase*trt + lage + V4, + random = ~ 1 | subject, + family = poisson, data = epil)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 Linear mixed-effects model fit by maximum likelihood Data: epil AIC BIC logLik NA NA NA Random effects: Formula: ~1 | subject (Intercept) Residual StdDev: 0.4442704 1.400807 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ lbase * trt + lage + V4 Value Std.Error DF t-value p-value (Intercept) 1.8696677 0.1055620 176 17.711554 0.0000 lbase 0.8818228 0.1292834 54 6.820849 0.0000 trtprogabide -0.3095253 0.1490438 54 -2.076740 0.0426 lage 0.5335460 0.3463119 54 1.540652 0.1292 V4 -0.1597696 0.0774521 176 -2.062819 0.0406 lbase:trtprogabide 0.3415425 0.2033325 54 1.679725 0.0988 Correlation: (Intr) lbase trtprg lage V4 lbase -0.126 trtprogabide -0.691 0.089 lage -0.103 -0.038 0.088 V4 -0.162 0.000 0.000 0.000 lbase:trtprogabide 0.055 -0.645 -0.184 0.267 0.000 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -2.13240534 -0.63871136 -0.08486339 0.41960195 4.97872138 Number of Observations: 236 Number of Groups: 59 > summary(glmmPQL(y ~ pred, random = ~1 | subject, + family = poisson, data = epil3)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 iteration 7 iteration 8 Linear mixed-effects model fit by maximum likelihood Data: epil3 AIC BIC logLik NA NA NA Random effects: Formula: ~1 | subject (Intercept) Residual StdDev: 0.7257895 2.16629 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ pred Value Std.Error DF t-value p-value (Intercept) 3.213631 0.10569117 58 30.405865 0.0000 predplacebo-base 0.110855 0.09989089 57 1.109763 0.2718 preddrug-placebo -0.105613 0.13480483 57 -0.783450 0.4366 Correlation: (Intr) prdpl- predplacebo-base 0.081 preddrug-placebo -0.010 -0.700 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -2.0446864 -0.4765135 -0.1975651 0.3145761 2.6532834 Number of Observations: 118 Number of Groups: 59 > > > > cleanEx() > nameEx("farms") > ### * farms > > flush(stderr()); flush(stdout()) > > ### Name: farms > ### Title: Ecological Factors in Farm Management > ### Aliases: farms > ### Keywords: datasets > > ### ** Examples > > farms.mca <- mca(farms, abbrev = TRUE) # Use levels as names > eqscplot(farms.mca$cs, type = "n") > text(farms.mca$rs, cex = 0.7) > text(farms.mca$cs, labels = dimnames(farms.mca$cs)[[1]], cex = 0.7) > > > > cleanEx() > nameEx("fitdistr") > ### * fitdistr > > flush(stderr()); flush(stdout()) > > ### Name: fitdistr > ### Title: Maximum-likelihood Fitting of Univariate Distributions > ### Aliases: fitdistr > ### Keywords: distribution htest > > ### ** Examples > > ## avoid spurious accuracy > op <- options(digits = 3) > set.seed(123) > x <- rgamma(100, shape = 5, rate = 0.1) > fitdistr(x, "gamma") shape rate 6.4870 0.1365 (0.8946) (0.0196) > ## now do this directly with more control. > fitdistr(x, dgamma, list(shape = 1, rate = 0.1), lower = 0.001) shape rate 6.4869 0.1365 (0.8944) (0.0196) > > set.seed(123) > x2 <- rt(250, df = 9) > fitdistr(x2, "t", df = 9) m s -0.0107 1.0441 ( 0.0722) ( 0.0543) > ## allow df to vary: not a very good idea! > fitdistr(x2, "t") Warning in dt((x - m)/s, df, log = TRUE) : NaNs produced m s df -0.00965 1.00617 6.62729 ( 0.07147) ( 0.07707) ( 2.71033) > ## now do fixed-df fit directly with more control. > mydt <- function(x, m, s, df) dt((x-m)/s, df)/s > fitdistr(x2, mydt, list(m = 0, s = 1), df = 9, lower = c(-Inf, 0)) m s -0.0107 1.0441 ( 0.0722) ( 0.0543) > > set.seed(123) > x3 <- rweibull(100, shape = 4, scale = 100) > fitdistr(x3, "weibull") shape scale 4.080 99.984 ( 0.313) ( 2.582) > > set.seed(123) > x4 <- rnegbin(500, mu = 5, theta = 4) > fitdistr(x4, "Negative Binomial") size mu 4.216 4.945 (0.504) (0.147) > options(op) > > > > cleanEx() > nameEx("fractions") > ### * fractions > > flush(stderr()); flush(stdout()) > > ### Name: fractions > ### Title: Rational Approximation > ### Aliases: fractions Math.fractions Ops.fractions Summary.fractions > ### [.fractions [<-.fractions as.character.fractions as.fractions > ### is.fractions print.fractions t.fractions > ### Keywords: math > > ### ** Examples > > X <- matrix(runif(25), 5, 5) > zapsmall(solve(X, X/5)) # print near-zeroes as zero [,1] [,2] [,3] [,4] [,5] [1,] 0.2 0.0 0.0 0.0 0.0 [2,] 0.0 0.2 0.0 0.0 0.0 [3,] 0.0 0.0 0.2 0.0 0.0 [4,] 0.0 0.0 0.0 0.2 0.0 [5,] 0.0 0.0 0.0 0.0 0.2 > fractions(solve(X, X/5)) [,1] [,2] [,3] [,4] [,5] [1,] 1/5 0 0 0 0 [2,] 0 1/5 0 0 0 [3,] 0 0 1/5 0 0 [4,] 0 0 0 1/5 0 [5,] 0 0 0 0 1/5 > fractions(solve(X, X/5)) + 1 [,1] [,2] [,3] [,4] [,5] [1,] 6/5 1 1 1 1 [2,] 1 6/5 1 1 1 [3,] 1 1 6/5 1 1 [4,] 1 1 1 6/5 1 [5,] 1 1 1 1 6/5 > > > > cleanEx() > nameEx("galaxies") > ### * galaxies > > flush(stderr()); flush(stdout()) > > ### Name: galaxies > ### Title: Velocities for 82 Galaxies > ### Aliases: galaxies > ### Keywords: datasets > > ### ** Examples > > gal <- galaxies/1000 > c(width.SJ(gal, method = "dpi"), width.SJ(gal)) [1] 3.256151 2.566423 > plot(x = c(0, 40), y = c(0, 0.3), type = "n", bty = "l", + xlab = "velocity of galaxy (1000km/s)", ylab = "density") > rug(gal) > lines(density(gal, width = 3.25, n = 200), lty = 1) > lines(density(gal, width = 2.56, n = 200), lty = 3) > > > > cleanEx() > nameEx("gamma.shape.glm") > ### * gamma.shape.glm > > flush(stderr()); flush(stdout()) > > ### Name: gamma.shape > ### Title: Estimate the Shape Parameter of the Gamma Distribution in a GLM > ### Fit > ### Aliases: gamma.shape gamma.shape.glm print.gamma.shape > ### Keywords: models > > ### ** Examples > > clotting <- data.frame( + u = c(5,10,15,20,30,40,60,80,100), + lot1 = c(118,58,42,35,27,25,21,19,18), + lot2 = c(69,35,26,21,18,16,13,12,12)) > clot1 <- glm(lot1 ~ log(u), data = clotting, family = Gamma) > gamma.shape(clot1) Alpha: 538.1315 SE: 253.5991 > > gm <- glm(Days + 0.1 ~ Age*Eth*Sex*Lrn, + quasi(link=log, variance="mu^2"), quine, + start = c(3, rep(0,31))) > gamma.shape(gm, verbose = TRUE) Initial estimate: 1.060344 Iter. 1 Alpha: 1.238408 Iter. 2 Alpha: 1.276997 Iter. 3 Alpha: 1.278343 Iter. 4 Alpha: 1.278345 Alpha: 1.2783449 SE: 0.1345175 > summary(gm, dispersion = gamma.dispersion(gm)) # better summary Call: glm(formula = Days + 0.1 ~ Age * Eth * Sex * Lrn, family = quasi(link = log, variance = "mu^2"), data = quine, start = c(3, rep(0, 31))) Deviance Residuals: Min 1Q Median 3Q Max -3.0385 -0.7164 -0.1532 0.3863 1.3087 Coefficients: (4 not defined because of singularities) Estimate Std. Error z value Pr(>|z|) (Intercept) 3.06105 0.44223 6.922 4.46e-12 *** AgeF1 -0.61870 0.59331 -1.043 0.297041 AgeF2 -2.31911 0.98885 -2.345 0.019014 * AgeF3 -0.37623 0.53149 -0.708 0.479020 EthN -0.13789 0.62540 -0.220 0.825496 SexM -0.48844 0.59331 -0.823 0.410369 LrnSL -1.92965 0.98885 -1.951 0.051009 . AgeF1:EthN 0.10249 0.82338 0.124 0.900942 AgeF2:EthN -0.50874 1.39845 -0.364 0.716017 AgeF3:EthN 0.06314 0.74584 0.085 0.932534 AgeF1:SexM 0.40695 0.94847 0.429 0.667884 AgeF2:SexM 3.06173 1.11626 2.743 0.006091 ** AgeF3:SexM 1.10841 0.74208 1.494 0.135267 EthN:SexM -0.74217 0.82338 -0.901 0.367394 AgeF1:LrnSL 2.60967 1.10114 2.370 0.017789 * AgeF2:LrnSL 4.78434 1.36304 3.510 0.000448 *** AgeF3:LrnSL NA NA NA NA EthN:LrnSL 2.22936 1.39845 1.594 0.110899 SexM:LrnSL 1.56531 1.18112 1.325 0.185077 AgeF1:EthN:SexM -0.30235 1.32176 -0.229 0.819065 AgeF2:EthN:SexM 0.29742 1.57035 0.189 0.849780 AgeF3:EthN:SexM 0.82215 1.03277 0.796 0.425995 AgeF1:EthN:LrnSL -3.50803 1.54655 -2.268 0.023311 * AgeF2:EthN:LrnSL -3.33529 1.92481 -1.733 0.083133 . AgeF3:EthN:LrnSL NA NA NA NA AgeF1:SexM:LrnSL -2.39791 1.51050 -1.587 0.112400 AgeF2:SexM:LrnSL -4.12161 1.60698 -2.565 0.010323 * AgeF3:SexM:LrnSL NA NA NA NA EthN:SexM:LrnSL -0.15305 1.66253 -0.092 0.926653 AgeF1:EthN:SexM:LrnSL 2.13480 2.08685 1.023 0.306317 AgeF2:EthN:SexM:LrnSL 2.11886 2.27882 0.930 0.352473 AgeF3:EthN:SexM:LrnSL NA NA NA NA --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for quasi family taken to be 0.7822615) Null deviance: 190.40 on 145 degrees of freedom Residual deviance: 128.36 on 118 degrees of freedom AIC: NA Number of Fisher Scoring iterations: 7 > > > > cleanEx() > nameEx("gehan") > ### * gehan > > flush(stderr()); flush(stdout()) > > ### Name: gehan > ### Title: Remission Times of Leukaemia Patients > ### Aliases: gehan > ### Keywords: datasets > > ### ** Examples > > library(survival) > gehan.surv <- survfit(Surv(time, cens) ~ treat, data = gehan, + conf.type = "log-log") > summary(gehan.surv) Call: survfit(formula = Surv(time, cens) ~ treat, data = gehan, conf.type = "log-log") treat=6-MP time n.risk n.event survival std.err lower 95% CI upper 95% CI 6 21 3 0.857 0.0764 0.620 0.952 7 17 1 0.807 0.0869 0.563 0.923 10 15 1 0.753 0.0963 0.503 0.889 13 12 1 0.690 0.1068 0.432 0.849 16 11 1 0.627 0.1141 0.368 0.805 22 7 1 0.538 0.1282 0.268 0.747 23 6 1 0.448 0.1346 0.188 0.680 treat=control time n.risk n.event survival std.err lower 95% CI upper 95% CI 1 21 2 0.9048 0.0641 0.67005 0.975 2 19 2 0.8095 0.0857 0.56891 0.924 3 17 1 0.7619 0.0929 0.51939 0.893 4 16 2 0.6667 0.1029 0.42535 0.825 5 14 2 0.5714 0.1080 0.33798 0.749 8 12 4 0.3810 0.1060 0.18307 0.578 11 8 2 0.2857 0.0986 0.11656 0.482 12 6 2 0.1905 0.0857 0.05948 0.377 15 4 1 0.1429 0.0764 0.03566 0.321 17 3 1 0.0952 0.0641 0.01626 0.261 22 2 1 0.0476 0.0465 0.00332 0.197 23 1 1 0.0000 NaN NA NA > survreg(Surv(time, cens) ~ factor(pair) + treat, gehan, dist = "exponential") Call: survreg(formula = Surv(time, cens) ~ factor(pair) + treat, data = gehan, dist = "exponential") Coefficients: (Intercept) factor(pair)2 factor(pair)3 factor(pair)4 factor(pair)5 2.0702861 2.1476909 1.8329493 1.7718527 1.4682566 factor(pair)6 factor(pair)7 factor(pair)8 factor(pair)9 factor(pair)10 1.8954775 0.5583010 2.5187140 2.2970513 2.4862208 factor(pair)11 factor(pair)12 factor(pair)13 factor(pair)14 factor(pair)15 1.0524472 1.8270477 1.6772567 1.7778672 2.0859913 factor(pair)16 factor(pair)17 factor(pair)18 factor(pair)19 factor(pair)20 3.0634288 0.7996252 1.5855018 1.4083884 0.4023946 factor(pair)21 treatcontrol 1.9698390 -1.7671562 Scale fixed at 1 Loglik(model)= -101.6 Loglik(intercept only)= -116.8 Chisq= 30.27 on 21 degrees of freedom, p= 0.087 n= 42 > summary(survreg(Surv(time, cens) ~ treat, gehan, dist = "exponential")) Call: survreg(formula = Surv(time, cens) ~ treat, data = gehan, dist = "exponential") Value Std. Error z p (Intercept) 3.69 0.333 11.06 2.00e-28 treatcontrol -1.53 0.398 -3.83 1.27e-04 Scale fixed at 1 Exponential distribution Loglik(model)= -108.5 Loglik(intercept only)= -116.8 Chisq= 16.49 on 1 degrees of freedom, p= 4.9e-05 Number of Newton-Raphson Iterations: 4 n= 42 > summary(survreg(Surv(time, cens) ~ treat, gehan)) Call: survreg(formula = Surv(time, cens) ~ treat, data = gehan) Value Std. Error z p (Intercept) 3.516 0.252 13.96 2.61e-44 treatcontrol -1.267 0.311 -4.08 4.51e-05 Log(scale) -0.312 0.147 -2.12 3.43e-02 Scale= 0.732 Weibull distribution Loglik(model)= -106.6 Loglik(intercept only)= -116.4 Chisq= 19.65 on 1 degrees of freedom, p= 9.3e-06 Number of Newton-Raphson Iterations: 5 n= 42 > gehan.cox <- coxph(Surv(time, cens) ~ treat, gehan) > summary(gehan.cox) Call: coxph(formula = Surv(time, cens) ~ treat, data = gehan) n= 42, number of events= 30 coef exp(coef) se(coef) z Pr(>|z|) treatcontrol 1.5721 4.8169 0.4124 3.812 0.000138 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 exp(coef) exp(-coef) lower .95 upper .95 treatcontrol 4.817 0.2076 2.147 10.81 Concordance= 0.69 (se = 0.053 ) Rsquare= 0.322 (max possible= 0.988 ) Likelihood ratio test= 16.35 on 1 df, p=5.261e-05 Wald test = 14.53 on 1 df, p=0.0001378 Score (logrank) test = 17.25 on 1 df, p=3.283e-05 > > > > cleanEx() detaching ‘package:survival’ > nameEx("ginv") > ### * ginv > > flush(stderr()); flush(stdout()) > > ### Name: ginv > ### Title: Generalized Inverse of a Matrix > ### Aliases: ginv > ### Keywords: algebra > > ### ** Examples > > ## Not run: > ##D # The function is currently defined as > ##D function(X, tol = sqrt(.Machine$double.eps)) > ##D { > ##D ## Generalized Inverse of a Matrix > ##D dnx <- dimnames(X) > ##D if(is.null(dnx)) dnx <- vector("list", 2) > ##D s <- svd(X) > ##D nz <- s$d > tol * s$d[1] > ##D structure( > ##D if(any(nz)) s$v[, nz] %*% (t(s$u[, nz])/s$d[nz]) else X, > ##D dimnames = dnx[2:1]) > ##D } > ## End(Not run) > > > cleanEx() > nameEx("glm.convert") > ### * glm.convert > > flush(stderr()); flush(stdout()) > > ### Name: glm.convert > ### Title: Change a Negative Binomial fit to a GLM fit > ### Aliases: glm.convert > ### Keywords: regression models > > ### ** Examples > > quine.nb1 <- glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = quine) > quine.nbA <- glm.convert(quine.nb1) > quine.nbB <- update(quine.nb1, . ~ . + Sex:Age:Lrn) > anova(quine.nbA, quine.nbB) Analysis of Deviance Table Model 1: Days ~ Sex/(Age + Eth * Lrn) Model 2: Days ~ Sex + Sex:Age + Sex:Eth + Sex:Lrn + Sex:Eth:Lrn + Sex:Age:Lrn Resid. Df Resid. Dev Df Deviance 1 132 167.56 2 128 166.83 4 0.723 > > > > cleanEx() > nameEx("glm.nb") > ### * glm.nb > > flush(stderr()); flush(stdout()) > > ### Name: glm.nb > ### Title: Fit a Negative Binomial Generalized Linear Model > ### Aliases: glm.nb family.negbin logLik.negbin > ### Keywords: regression models > > ### ** Examples > > quine.nb1 <- glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = quine) > quine.nb2 <- update(quine.nb1, . ~ . + Sex:Age:Lrn) > quine.nb3 <- update(quine.nb2, Days ~ .^4) > anova(quine.nb1, quine.nb2, quine.nb3) Likelihood ratio tests of Negative Binomial Models Response: Days Model 1 Sex/(Age + Eth * Lrn) 2 Sex + Sex:Age + Sex:Eth + Sex:Lrn + Sex:Eth:Lrn + Sex:Age:Lrn 3 Sex + Sex:Age + Sex:Eth + Sex:Lrn + Sex:Eth:Lrn + Sex:Age:Lrn + Sex:Age:Eth + Sex:Age:Eth:Lrn theta Resid. df 2 x log-lik. Test df LR stat. Pr(Chi) 1 1.597991 132 -1063.025 2 1.686899 128 -1055.398 1 vs 2 4 7.627279 0.10622602 3 1.928360 118 -1039.324 2 vs 3 10 16.073723 0.09754136 > ## Don't show: > ## PR#1695 > y <- c(7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, 6, 12, + 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3, 3, 4) > > lag1 <- c(0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, 10, + 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3, 3) > > lag2 <- c(0, 0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, 7, + 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5, 3) > > lag3 <- c(0, 0, 0, 7, 5, 4, 7, 5, 2, 11, 5, 5, 4, 2, 3, 4, 3, 5, 9, 6, + 7, 10, 6, 12, 6, 3, 5, 3, 9, 13, 0, 6, 1, 2, 0, 1, 0, 0, 4, 5, 1, 5) > > (fit <- glm(y ~ lag1+lag2+lag3, family=poisson(link=identity), + start=c(2, 0.1, 0.1, 0.1))) Call: glm(formula = y ~ lag1 + lag2 + lag3, family = poisson(link = identity), start = c(2, 0.1, 0.1, 0.1)) Coefficients: (Intercept) lag1 lag2 lag3 2.6609 0.1573 0.1424 0.1458 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 100.2 Residual Deviance: 90.34 AIC: 225.6 > try(glm.nb(y ~ lag1+lag2+lag3, link=identity)) Warning in log(y/mu) : NaNs produced Error : no valid set of coefficients has been found: please supply starting values > glm.nb(y ~ lag1+lag2+lag3, link=identity, start=c(2, 0.1, 0.1, 0.1)) Call: glm.nb(formula = y ~ lag1 + lag2 + lag3, start = c(2, 0.1, 0.1, 0.1), link = identity, init.theta = 4.406504429) Coefficients: (Intercept) lag1 lag2 lag3 2.6298 0.1774 0.1407 0.1346 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 55.07 Residual Deviance: 50.09 AIC: 215.9 > glm.nb(y ~ lag1+lag2+lag3, link=identity, start=coef(fit)) Call: glm.nb(formula = y ~ lag1 + lag2 + lag3, start = coef(fit), link = identity, init.theta = 4.406504429) Coefficients: (Intercept) lag1 lag2 lag3 2.6298 0.1774 0.1407 0.1346 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 55.07 Residual Deviance: 50.09 AIC: 215.9 > glm.nb(y ~ lag1+lag2+lag3, link=identity, etastart=rep(5, 42)) Call: glm.nb(formula = y ~ lag1 + lag2 + lag3, etastart = rep(5, 42), link = identity, init.theta = 4.406504429) Coefficients: (Intercept) lag1 lag2 lag3 2.6298 0.1774 0.1407 0.1346 Degrees of Freedom: 41 Total (i.e. Null); 38 Residual Null Deviance: 55.07 Residual Deviance: 50.09 AIC: 215.9 > ## End(Don't show) > > > cleanEx() > nameEx("glmmPQL") > ### * glmmPQL > > flush(stderr()); flush(stdout()) > > ### Name: glmmPQL > ### Title: Fit Generalized Linear Mixed Models via PQL > ### Aliases: glmmPQL > ### Keywords: models > > ### ** Examples > > library(nlme) # will be loaded automatically if omitted > summary(glmmPQL(y ~ trt + I(week > 2), random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.410637 0.7800511 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + I(week > 2) Value Std.Error DF t-value p-value (Intercept) 3.412014 0.5185033 169 6.580506 0.0000 trtdrug -1.247355 0.6440635 47 -1.936696 0.0588 trtdrug+ -0.754327 0.6453978 47 -1.168779 0.2484 I(week > 2)TRUE -1.607257 0.3583379 169 -4.485311 0.0000 Correlation: (Intr) trtdrg trtdr+ trtdrug -0.598 trtdrug+ -0.571 0.460 I(week > 2)TRUE -0.537 0.047 -0.001 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -5.1985361 0.1572336 0.3513075 0.4949482 1.7448845 Number of Observations: 220 Number of Groups: 50 > ## Don't show: > # an example of offset > summary(glmmPQL(y ~ trt + week, random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.325243 0.7903088 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + week Value Std.Error DF t-value p-value (Intercept) 3.0302276 0.4791396 169 6.324310 0.0000 trtdrug -1.2176812 0.6160113 47 -1.976719 0.0540 trtdrug+ -0.7886376 0.6193895 47 -1.273250 0.2092 week -0.1446463 0.0392343 169 -3.686730 0.0003 Correlation: (Intr) trtdrg trtdr+ trtdrug -0.622 trtdrug+ -0.609 0.464 week -0.481 0.050 0.030 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -4.2868074 0.2039043 0.3140333 0.5440835 1.9754065 Number of Observations: 220 Number of Groups: 50 > summary(glmmPQL(y ~ trt + week + offset(week), random = ~ 1 | ID, + family = binomial, data = bacteria)) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 Linear mixed-effects model fit by maximum likelihood Data: bacteria AIC BIC logLik NA NA NA Random effects: Formula: ~1 | ID (Intercept) Residual StdDev: 1.325243 0.7903088 Variance function: Structure: fixed weights Formula: ~invwt Fixed effects: y ~ trt + week + offset(week) Value Std.Error DF t-value p-value (Intercept) 3.0302276 0.4791396 169 6.324310 0.0000 trtdrug -1.2176812 0.6160113 47 -1.976719 0.0540 trtdrug+ -0.7886376 0.6193895 47 -1.273250 0.2092 week -1.1446463 0.0392343 169 -29.174622 0.0000 Correlation: (Intr) trtdrg trtdr+ trtdrug -0.622 trtdrug+ -0.609 0.464 week -0.481 0.050 0.030 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -4.2868074 0.2039043 0.3140333 0.5440835 1.9754065 Number of Observations: 220 Number of Groups: 50 > ## End(Don't show) > > > cleanEx() detaching ‘package:nlme’ > nameEx("housing") > ### * housing > > flush(stderr()); flush(stdout()) > > ### Name: housing > ### Title: Frequency Table from a Copenhagen Housing Conditions Survey > ### Aliases: housing > ### Keywords: datasets > > ### ** Examples > > options(contrasts = c("contr.treatment", "contr.poly")) > > # Surrogate Poisson models > house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat, family = poisson, + data = housing) > summary(house.glm0, cor = FALSE) Call: glm(formula = Freq ~ Infl * Type * Cont + Sat, family = poisson, data = housing) Deviance Residuals: Min 1Q Median 3Q Max -4.5551 -1.0612 -0.0593 0.6483 4.1478 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 3.136e+00 1.196e-01 26.225 < 2e-16 *** InflMedium 2.733e-01 1.586e-01 1.723 0.084868 . InflHigh -2.054e-01 1.784e-01 -1.152 0.249511 TypeApartment 3.666e-01 1.555e-01 2.357 0.018403 * TypeAtrium -7.828e-01 2.134e-01 -3.668 0.000244 *** TypeTerrace -8.145e-01 2.157e-01 -3.775 0.000160 *** ContHigh -1.490e-15 1.690e-01 0.000 1.000000 Sat.L 1.159e-01 4.038e-02 2.871 0.004094 ** Sat.Q 2.629e-01 4.515e-02 5.824 5.76e-09 *** InflMedium:TypeApartment -1.177e-01 2.086e-01 -0.564 0.572571 InflHigh:TypeApartment 1.753e-01 2.279e-01 0.769 0.441783 InflMedium:TypeAtrium -4.068e-01 3.035e-01 -1.340 0.180118 InflHigh:TypeAtrium -1.692e-01 3.294e-01 -0.514 0.607433 InflMedium:TypeTerrace 6.292e-03 2.860e-01 0.022 0.982450 InflHigh:TypeTerrace -9.305e-02 3.280e-01 -0.284 0.776633 InflMedium:ContHigh -1.398e-01 2.279e-01 -0.613 0.539715 InflHigh:ContHigh -6.091e-01 2.800e-01 -2.176 0.029585 * TypeApartment:ContHigh 5.029e-01 2.109e-01 2.385 0.017083 * TypeAtrium:ContHigh 6.774e-01 2.751e-01 2.462 0.013811 * TypeTerrace:ContHigh 1.099e+00 2.675e-01 4.106 4.02e-05 *** InflMedium:TypeApartment:ContHigh 5.359e-02 2.862e-01 0.187 0.851450 InflHigh:TypeApartment:ContHigh 1.462e-01 3.380e-01 0.432 0.665390 InflMedium:TypeAtrium:ContHigh 1.555e-01 3.907e-01 0.398 0.690597 InflHigh:TypeAtrium:ContHigh 4.782e-01 4.441e-01 1.077 0.281619 InflMedium:TypeTerrace:ContHigh -4.980e-01 3.671e-01 -1.357 0.174827 InflHigh:TypeTerrace:ContHigh -4.470e-01 4.545e-01 -0.984 0.325326 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 833.66 on 71 degrees of freedom Residual deviance: 217.46 on 46 degrees of freedom AIC: 610.43 Number of Fisher Scoring iterations: 5 > > addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test = "Chisq") Single term additions Model: Freq ~ Infl * Type * Cont + Sat Df Deviance AIC LRT Pr(Chi) 217.46 610.43 Infl:Sat 4 111.08 512.05 106.371 < 2.2e-16 *** Type:Sat 6 156.79 561.76 60.669 3.292e-11 *** Cont:Sat 2 212.33 609.30 5.126 0.07708 . --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > house.glm1 <- update(house.glm0, . ~ . + Sat*(Infl+Type+Cont)) > summary(house.glm1, cor = FALSE) Call: glm(formula = Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont, family = poisson, data = housing) Deviance Residuals: Min 1Q Median 3Q Max -1.6022 -0.5282 -0.0641 0.5757 1.9322 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 3.135074 0.120112 26.101 < 2e-16 *** InflMedium 0.248327 0.159979 1.552 0.120602 InflHigh -0.412645 0.184947 -2.231 0.025671 * TypeApartment 0.292524 0.157477 1.858 0.063231 . TypeAtrium -0.792847 0.214413 -3.698 0.000218 *** TypeTerrace -1.018074 0.221263 -4.601 4.20e-06 *** ContHigh -0.001407 0.169711 -0.008 0.993385 Sat.L -0.098106 0.112592 -0.871 0.383570 Sat.Q 0.285657 0.122283 2.336 0.019489 * InflMedium:TypeApartment -0.017882 0.210496 -0.085 0.932302 InflHigh:TypeApartment 0.386869 0.233297 1.658 0.097263 . InflMedium:TypeAtrium -0.360311 0.304979 -1.181 0.237432 InflHigh:TypeAtrium -0.036788 0.334793 -0.110 0.912503 InflMedium:TypeTerrace 0.185154 0.288892 0.641 0.521580 InflHigh:TypeTerrace 0.310749 0.334815 0.928 0.353345 InflMedium:ContHigh -0.200060 0.228748 -0.875 0.381799 InflHigh:ContHigh -0.725790 0.282352 -2.571 0.010155 * TypeApartment:ContHigh 0.569691 0.212152 2.685 0.007247 ** TypeAtrium:ContHigh 0.702115 0.276056 2.543 0.010979 * TypeTerrace:ContHigh 1.215930 0.269968 4.504 6.67e-06 *** InflMedium:Sat.L 0.519627 0.096830 5.366 8.03e-08 *** InflHigh:Sat.L 1.140302 0.118180 9.649 < 2e-16 *** InflMedium:Sat.Q -0.064474 0.102666 -0.628 0.530004 InflHigh:Sat.Q 0.115436 0.127798 0.903 0.366380 TypeApartment:Sat.L -0.520170 0.109793 -4.738 2.16e-06 *** TypeAtrium:Sat.L -0.288484 0.149551 -1.929 0.053730 . TypeTerrace:Sat.L -0.998666 0.141527 -7.056 1.71e-12 *** TypeApartment:Sat.Q 0.055418 0.118515 0.468 0.640068 TypeAtrium:Sat.Q -0.273820 0.149713 -1.829 0.067405 . TypeTerrace:Sat.Q -0.032328 0.149251 -0.217 0.828520 ContHigh:Sat.L 0.340703 0.087778 3.881 0.000104 *** ContHigh:Sat.Q -0.097929 0.094068 -1.041 0.297851 InflMedium:TypeApartment:ContHigh 0.046900 0.286212 0.164 0.869837 InflHigh:TypeApartment:ContHigh 0.126229 0.338208 0.373 0.708979 InflMedium:TypeAtrium:ContHigh 0.157239 0.390719 0.402 0.687364 InflHigh:TypeAtrium:ContHigh 0.478611 0.444244 1.077 0.281320 InflMedium:TypeTerrace:ContHigh -0.500162 0.367135 -1.362 0.173091 InflHigh:TypeTerrace:ContHigh -0.463099 0.454713 -1.018 0.308467 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for poisson family taken to be 1) Null deviance: 833.657 on 71 degrees of freedom Residual deviance: 38.662 on 34 degrees of freedom AIC: 455.63 Number of Fisher Scoring iterations: 4 > > 1 - pchisq(deviance(house.glm1), house.glm1$df.residual) [1] 0.2671363 > > dropterm(house.glm1, test = "Chisq") Single term deletions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Sat 4 147.780 556.75 109.117 < 2.2e-16 *** Type:Sat 6 100.889 505.86 62.227 1.586e-11 *** Cont:Sat 2 54.722 467.69 16.060 0.0003256 *** Infl:Type:Cont 6 43.952 448.92 5.290 0.5072454 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq") Single term additions Model: Freq ~ Infl + Type + Cont + Sat + Infl:Type + Infl:Cont + Type:Cont + Infl:Sat + Type:Sat + Cont:Sat + Infl:Type:Cont Df Deviance AIC LRT Pr(Chi) 38.662 455.63 Infl:Type:Sat 12 16.107 457.08 22.5550 0.03175 * Infl:Cont:Sat 4 37.472 462.44 1.1901 0.87973 Type:Cont:Sat 6 28.256 457.23 10.4064 0.10855 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > hnames <- lapply(housing[, -5], levels) # omit Freq > newData <- expand.grid(hnames) > newData$Sat <- ordered(newData$Sat) > house.pm <- predict(house.glm1, newData, + type = "response") # poisson means > house.pm <- matrix(house.pm, ncol = 3, byrow = TRUE, + dimnames = list(NULL, hnames[[1]])) > house.pr <- house.pm/drop(house.pm %*% rep(1, 3)) > cbind(expand.grid(hnames[-1]), round(house.pr, 2)) Infl Type Cont Low Medium High 1 Low Tower Low 0.40 0.26 0.34 2 Medium Tower Low 0.26 0.27 0.47 3 High Tower Low 0.15 0.19 0.66 4 Low Apartment Low 0.54 0.23 0.23 5 Medium Apartment Low 0.39 0.26 0.34 6 High Apartment Low 0.26 0.21 0.53 7 Low Atrium Low 0.43 0.32 0.25 8 Medium Atrium Low 0.30 0.35 0.36 9 High Atrium Low 0.19 0.27 0.54 10 Low Terrace Low 0.65 0.22 0.14 11 Medium Terrace Low 0.51 0.27 0.22 12 High Terrace Low 0.37 0.24 0.39 13 Low Tower High 0.30 0.28 0.42 14 Medium Tower High 0.18 0.27 0.54 15 High Tower High 0.10 0.19 0.71 16 Low Apartment High 0.44 0.27 0.30 17 Medium Apartment High 0.30 0.28 0.42 18 High Apartment High 0.18 0.21 0.61 19 Low Atrium High 0.33 0.36 0.31 20 Medium Atrium High 0.22 0.36 0.42 21 High Atrium High 0.13 0.27 0.60 22 Low Terrace High 0.55 0.27 0.19 23 Medium Terrace High 0.40 0.31 0.29 24 High Terrace High 0.27 0.26 0.47 > > # Iterative proportional scaling > loglm(Freq ~ Infl*Type*Cont + Sat*(Infl+Type+Cont), data = housing) Call: loglm(formula = Freq ~ Infl * Type * Cont + Sat * (Infl + Type + Cont), data = housing) Statistics: X^2 df P(> X^2) Likelihood Ratio 38.66222 34 0.2671359 Pearson 38.90831 34 0.2582333 > > > # multinomial model > library(nnet) > (house.mult<- multinom(Sat ~ Infl + Type + Cont, weights = Freq, + data = housing)) # weights: 24 (14 variable) initial value 1846.767257 iter 10 value 1747.045232 final value 1735.041933 converged Call: multinom(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: (Intercept) InflMedium InflHigh TypeApartment TypeAtrium TypeTerrace Medium -0.4192316 0.4464003 0.6649367 -0.4356851 0.1313663 -0.6665728 High -0.1387453 0.7348626 1.6126294 -0.7356261 -0.4079808 -1.4123333 ContHigh Medium 0.3608513 High 0.4818236 Residual Deviance: 3470.084 AIC: 3498.084 > house.mult2 <- multinom(Sat ~ Infl*Type*Cont, weights = Freq, + data = housing) # weights: 75 (48 variable) initial value 1846.767257 iter 10 value 1734.465581 iter 20 value 1717.220153 iter 30 value 1715.760679 iter 40 value 1715.713306 final value 1715.710836 converged > anova(house.mult, house.mult2) Likelihood ratio tests of Multinomial Models Response: Sat Model Resid. df Resid. Dev Test Df LR stat. Pr(Chi) 1 Infl + Type + Cont 130 3470.084 2 Infl * Type * Cont 96 3431.422 1 vs 2 34 38.66219 0.2671367 > > house.pm <- predict(house.mult, expand.grid(hnames[-1]), type = "probs") > cbind(expand.grid(hnames[-1]), round(house.pm, 2)) Infl Type Cont Low Medium High 1 Low Tower Low 0.40 0.26 0.34 2 Medium Tower Low 0.26 0.27 0.47 3 High Tower Low 0.15 0.19 0.66 4 Low Apartment Low 0.54 0.23 0.23 5 Medium Apartment Low 0.39 0.26 0.34 6 High Apartment Low 0.26 0.21 0.53 7 Low Atrium Low 0.43 0.32 0.25 8 Medium Atrium Low 0.30 0.35 0.36 9 High Atrium Low 0.19 0.27 0.54 10 Low Terrace Low 0.65 0.22 0.14 11 Medium Terrace Low 0.51 0.27 0.22 12 High Terrace Low 0.37 0.24 0.39 13 Low Tower High 0.30 0.28 0.42 14 Medium Tower High 0.18 0.27 0.54 15 High Tower High 0.10 0.19 0.71 16 Low Apartment High 0.44 0.27 0.30 17 Medium Apartment High 0.30 0.28 0.42 18 High Apartment High 0.18 0.21 0.61 19 Low Atrium High 0.33 0.36 0.31 20 Medium Atrium High 0.22 0.36 0.42 21 High Atrium High 0.13 0.27 0.60 22 Low Terrace High 0.55 0.27 0.19 23 Medium Terrace High 0.40 0.31 0.29 24 High Terrace High 0.27 0.26 0.47 > > # proportional odds model > house.cpr <- apply(house.pr, 1, cumsum) > logit <- function(x) log(x/(1-x)) > house.ld <- logit(house.cpr[2, ]) - logit(house.cpr[1, ]) > (ratio <- sort(drop(house.ld))) [1] 0.9357341 0.9854433 1.0573182 1.0680491 1.0772649 1.0803574 1.0824895 [8] 1.0998759 1.1199975 1.1554228 1.1768138 1.1866427 1.2091541 1.2435026 [15] 1.2724096 1.2750171 1.2849903 1.3062598 1.3123988 1.3904715 1.4540087 [22] 1.4947753 1.4967585 1.6068789 > mean(ratio) [1] 1.223835 > > (house.plr <- polr(Sat ~ Infl + Type + Cont, + data = housing, weights = Freq)) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: InflMedium InflHigh TypeApartment TypeAtrium TypeTerrace 0.5663937 1.2888191 -0.5723501 -0.3661866 -1.0910149 ContHigh 0.3602841 Intercepts: Low|Medium Medium|High -0.4961353 0.6907083 Residual Deviance: 3479.149 AIC: 3495.149 > > house.pr1 <- predict(house.plr, expand.grid(hnames[-1]), type = "probs") > cbind(expand.grid(hnames[-1]), round(house.pr1, 2)) Infl Type Cont Low Medium High 1 Low Tower Low 0.38 0.29 0.33 2 Medium Tower Low 0.26 0.27 0.47 3 High Tower Low 0.14 0.21 0.65 4 Low Apartment Low 0.52 0.26 0.22 5 Medium Apartment Low 0.38 0.29 0.33 6 High Apartment Low 0.23 0.26 0.51 7 Low Atrium Low 0.47 0.27 0.26 8 Medium Atrium Low 0.33 0.29 0.38 9 High Atrium Low 0.19 0.25 0.56 10 Low Terrace Low 0.64 0.21 0.14 11 Medium Terrace Low 0.51 0.26 0.23 12 High Terrace Low 0.33 0.29 0.38 13 Low Tower High 0.30 0.28 0.42 14 Medium Tower High 0.19 0.25 0.56 15 High Tower High 0.10 0.17 0.72 16 Low Apartment High 0.43 0.28 0.29 17 Medium Apartment High 0.30 0.28 0.42 18 High Apartment High 0.17 0.23 0.60 19 Low Atrium High 0.38 0.29 0.33 20 Medium Atrium High 0.26 0.27 0.47 21 High Atrium High 0.14 0.21 0.64 22 Low Terrace High 0.56 0.25 0.19 23 Medium Terrace High 0.42 0.28 0.30 24 High Terrace High 0.26 0.27 0.47 > > Fr <- matrix(housing$Freq, ncol = 3, byrow = TRUE) > 2*sum(Fr*log(house.pr/house.pr1)) [1] 9.065433 > > house.plr2 <- stepAIC(house.plr, ~.^2) Start: AIC=3495.15 Sat ~ Infl + Type + Cont Df AIC + Infl:Type 6 3484.6 + Type:Cont 3 3492.5 3495.1 + Infl:Cont 2 3498.9 - Cont 1 3507.5 - Type 3 3545.1 - Infl 2 3599.4 Step: AIC=3484.64 Sat ~ Infl + Type + Cont + Infl:Type Df AIC + Type:Cont 3 3482.7 3484.6 + Infl:Cont 2 3488.5 - Infl:Type 6 3495.1 - Cont 1 3497.8 Step: AIC=3482.69 Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Df AIC 3482.7 - Type:Cont 3 3484.6 + Infl:Cont 2 3486.6 - Infl:Type 6 3492.5 > house.plr2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: Sat ~ Infl + Type + Cont Final Model: Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Step Df Deviance Resid. Df Resid. Dev AIC 1 1673 3479.149 3495.149 2 + Infl:Type 6 22.509347 1667 3456.640 3484.640 3 + Type:Cont 3 7.945029 1664 3448.695 3482.695 > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() detaching ‘package:nnet’ > nameEx("huber") > ### * huber > > flush(stderr()); flush(stdout()) > > ### Name: huber > ### Title: Huber M-estimator of Location with MAD Scale > ### Aliases: huber > ### Keywords: robust > > ### ** Examples > > huber(chem) $mu [1] 3.206724 $s [1] 0.526323 > > > > cleanEx() > nameEx("hubers") > ### * hubers > > flush(stderr()); flush(stdout()) > > ### Name: hubers > ### Title: Huber Proposal 2 Robust Estimator of Location and/or Scale > ### Aliases: hubers > ### Keywords: robust > > ### ** Examples > > hubers(chem) $mu [1] 3.205498 $s [1] 0.673652 > hubers(chem, mu=3.68) $mu [1] 3.68 $s [1] 0.9409628 > > > > cleanEx() > nameEx("immer") > ### * immer > > flush(stderr()); flush(stdout()) > > ### Name: immer > ### Title: Yields from a Barley Field Trial > ### Aliases: immer > ### Keywords: datasets > > ### ** Examples > > immer.aov <- aov(cbind(Y1,Y2) ~ Loc + Var, data = immer) > summary(immer.aov) Response Y1 : Df Sum Sq Mean Sq F value Pr(>F) Loc 5 17829.8 3566.0 21.8923 1.751e-07 *** Var 4 2756.6 689.2 4.2309 0.01214 * Residuals 20 3257.7 162.9 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Response Y2 : Df Sum Sq Mean Sq F value Pr(>F) Loc 5 10285.0 2056.99 10.3901 5.049e-05 *** Var 4 2845.2 711.29 3.5928 0.02306 * Residuals 20 3959.5 197.98 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > immer.aov <- aov((Y1+Y2)/2 ~ Var + Loc, data = immer) > summary(immer.aov) Df Sum Sq Mean Sq F value Pr(>F) Var 4 2655 663.7 5.989 0.00245 ** Loc 5 10610 2122.1 19.148 5.21e-07 *** Residuals 20 2217 110.8 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > model.tables(immer.aov, type = "means", se = TRUE, cterms = "Var") Tables of means Grand mean 101.09 Var Var M P S T V 94.39 102.54 91.13 118.20 99.18 Standard errors for differences of means Var 6.078 replic. 6 > > > > cleanEx() > nameEx("isoMDS") > ### * isoMDS > > flush(stderr()); flush(stdout()) > > ### Name: isoMDS > ### Title: Kruskal's Non-metric Multidimensional Scaling > ### Aliases: isoMDS Shepard > ### Keywords: multivariate > > ### ** Examples > > swiss.x <- as.matrix(swiss[, -1]) > swiss.dist <- dist(swiss.x) > swiss.mds <- isoMDS(swiss.dist) initial value 2.979731 iter 5 value 2.431486 iter 10 value 2.343353 final value 2.338839 converged > plot(swiss.mds$points, type = "n") > text(swiss.mds$points, labels = as.character(1:nrow(swiss.x))) > swiss.sh <- Shepard(swiss.dist, swiss.mds$points) > plot(swiss.sh, pch = ".") > lines(swiss.sh$x, swiss.sh$yf, type = "S") > > > > cleanEx() > nameEx("kde2d") > ### * kde2d > > flush(stderr()); flush(stdout()) > > ### Name: kde2d > ### Title: Two-Dimensional Kernel Density Estimation > ### Aliases: kde2d > ### Keywords: dplot > > ### ** Examples > > attach(geyser) > plot(duration, waiting, xlim = c(0.5,6), ylim = c(40,100)) > f1 <- kde2d(duration, waiting, n = 50, lims = c(0.5, 6, 40, 100)) > image(f1, zlim = c(0, 0.05)) > f2 <- kde2d(duration, waiting, n = 50, lims = c(0.5, 6, 40, 100), + h = c(width.SJ(duration), width.SJ(waiting)) ) > image(f2, zlim = c(0, 0.05)) > persp(f2, phi = 30, theta = 20, d = 5) > > plot(duration[-272], duration[-1], xlim = c(0.5, 6), + ylim = c(1, 6),xlab = "previous duration", ylab = "duration") > f1 <- kde2d(duration[-272], duration[-1], + h = rep(1.5, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) > contour(f1, xlab = "previous duration", + ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) > f1 <- kde2d(duration[-272], duration[-1], + h = rep(0.6, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) > contour(f1, xlab = "previous duration", + ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) > f1 <- kde2d(duration[-272], duration[-1], + h = rep(0.4, 2), n = 50, lims = c(0.5, 6, 0.5, 6)) > contour(f1, xlab = "previous duration", + ylab = "duration", levels = c(0.05, 0.1, 0.2, 0.4) ) > detach("geyser") > > > > cleanEx() > nameEx("lda") > ### * lda > > flush(stderr()); flush(stdout()) > > ### Name: lda > ### Title: Linear Discriminant Analysis > ### Aliases: lda lda.default lda.data.frame lda.formula lda.matrix > ### model.frame.lda print.lda coef.lda > ### Keywords: multivariate > > ### ** Examples > > Iris <- data.frame(rbind(iris3[,,1], iris3[,,2], iris3[,,3]), + Sp = rep(c("s","c","v"), rep(50,3))) > train <- sample(1:150, 75) > table(Iris$Sp[train]) c s v 28 26 21 > ## your answer may differ > ## c s v > ## 22 23 30 > z <- lda(Sp ~ ., Iris, prior = c(1,1,1)/3, subset = train) > predict(z, Iris[-train, ])$class [1] s s s s s s s s s s s s s s s s s s s s s s s s c c c c c c c c c c c c c c [39] c c c c c c c c v v v v v v v v v v v v v v v v v v v v v v v v v v v v v Levels: c s v > ## [1] s s s s s s s s s s s s s s s s s s s s s s s s s s s c c c > ## [31] c c c c c c c v c c c c v c c c c c c c c c c c c v v v v v > ## [61] v v v v v v v v v v v v v v v > (z1 <- update(z, . ~ . - Petal.W.)) Call: lda(Sp ~ Sepal.L. + Sepal.W. + Petal.L., data = Iris, prior = c(1, 1, 1)/3, subset = train) Prior probabilities of groups: c s v 0.3333333 0.3333333 0.3333333 Group means: Sepal.L. Sepal.W. Petal.L. c 5.878571 2.785714 4.278571 s 4.950000 3.338462 1.457692 v 6.504762 2.957143 5.500000 Coefficients of linear discriminants: LD1 LD2 Sepal.L. 1.2555097 -0.2975141 Sepal.W. 0.9085695 2.8401071 Petal.L. -3.5502485 0.4380715 Proportion of trace: LD1 LD2 0.9957 0.0043 > > > > cleanEx() > nameEx("leuk") > ### * leuk > > flush(stderr()); flush(stdout()) > > ### Name: leuk > ### Title: Survival Times and White Blood Counts for Leukaemia Patients > ### Aliases: leuk > ### Keywords: datasets > > ### ** Examples > > library(survival) > plot(survfit(Surv(time) ~ ag, data = leuk), lty = 2:3, col = 2:3) > > # now Cox models > leuk.cox <- coxph(Surv(time) ~ ag + log(wbc), leuk) > summary(leuk.cox) Call: coxph(formula = Surv(time) ~ ag + log(wbc), data = leuk) n= 33, number of events= 33 coef exp(coef) se(coef) z Pr(>|z|) agpresent -1.0691 0.3433 0.4293 -2.490 0.01276 * log(wbc) 0.3677 1.4444 0.1360 2.703 0.00687 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 exp(coef) exp(-coef) lower .95 upper .95 agpresent 0.3433 2.9126 0.148 0.7964 log(wbc) 1.4444 0.6923 1.106 1.8857 Concordance= 0.726 (se = 0.065 ) Rsquare= 0.377 (max possible= 0.994 ) Likelihood ratio test= 15.64 on 2 df, p=0.0004014 Wald test = 15.06 on 2 df, p=0.0005365 Score (logrank) test = 16.49 on 2 df, p=0.0002629 > > > > cleanEx() detaching ‘package:survival’ > nameEx("lm.ridge") > ### * lm.ridge > > flush(stderr()); flush(stdout()) > > ### Name: lm.ridge > ### Title: Ridge Regression > ### Aliases: lm.ridge plot.ridgelm print.ridgelm select select.ridgelm > ### Keywords: models > > ### ** Examples > > longley # not the same as the S-PLUS dataset GNP.deflator GNP Unemployed Armed.Forces Population Year Employed 1947 83.0 234.289 235.6 159.0 107.608 1947 60.323 1948 88.5 259.426 232.5 145.6 108.632 1948 61.122 1949 88.2 258.054 368.2 161.6 109.773 1949 60.171 1950 89.5 284.599 335.1 165.0 110.929 1950 61.187 1951 96.2 328.975 209.9 309.9 112.075 1951 63.221 1952 98.1 346.999 193.2 359.4 113.270 1952 63.639 1953 99.0 365.385 187.0 354.7 115.094 1953 64.989 1954 100.0 363.112 357.8 335.0 116.219 1954 63.761 1955 101.2 397.469 290.4 304.8 117.388 1955 66.019 1956 104.6 419.180 282.2 285.7 118.734 1956 67.857 1957 108.4 442.769 293.6 279.8 120.445 1957 68.169 1958 110.8 444.546 468.1 263.7 121.950 1958 66.513 1959 112.6 482.704 381.3 255.2 123.366 1959 68.655 1960 114.2 502.601 393.1 251.4 125.368 1960 69.564 1961 115.7 518.173 480.6 257.2 127.852 1961 69.331 1962 116.9 554.894 400.7 282.7 130.081 1962 70.551 > names(longley)[1] <- "y" > lm.ridge(y ~ ., longley) GNP Unemployed Armed.Forces Population 2946.85636017 0.26352725 0.03648291 0.01116105 -1.73702984 Year Employed -1.41879853 0.23128785 > plot(lm.ridge(y ~ ., longley, + lambda = seq(0,0.1,0.001))) > select(lm.ridge(y ~ ., longley, + lambda = seq(0,0.1,0.0001))) modified HKB estimator is 0.006836982 modified L-W estimator is 0.05267247 smallest value of GCV at 0.0057 > > > > cleanEx() > nameEx("loglm") > ### * loglm > > flush(stderr()); flush(stdout()) > > ### Name: loglm > ### Title: Fit Log-Linear Models by Iterative Proportional Scaling > ### Aliases: loglm > ### Keywords: category models > > ### ** Examples > > # The data frames Cars93, minn38 and quine are available > # in the MASS package. > > # Case 1: frequencies specified as an array. > sapply(minn38, function(x) length(levels(x))) hs phs fol sex f 3 4 7 2 0 > ## hs phs fol sex f > ## 3 4 7 2 0 > ##minn38a <- array(0, c(3,4,7,2), lapply(minn38[, -5], levels)) > ##minn38a[data.matrix(minn38[,-5])] <- minn38$f > > ## or more simply > minn38a <- xtabs(f ~ ., minn38) > > fm <- loglm(~ 1 + 2 + 3 + 4, minn38a) # numerals as names. > deviance(fm) [1] 3711.914 > ## [1] 3711.9 > fm1 <- update(fm, .~.^2) > fm2 <- update(fm, .~.^3, print = TRUE) 5 iterations: deviation 0.07512432 > ## 5 iterations: deviation 0.075 > anova(fm, fm1, fm2) LR tests for hierarchical log-linear models Model 1: ~1 + 2 + 3 + 4 Model 2: . ~ `1` + `2` + `3` + `4` + `1`:`2` + `1`:`3` + `1`:`4` + `2`:`3` + `2`:`4` + `3`:`4` Model 3: . ~ `1` + `2` + `3` + `4` + `1`:`2` + `1`:`3` + `1`:`4` + `2`:`3` + `2`:`4` + `3`:`4` + `1`:`2`:`3` + `1`:`2`:`4` + `1`:`3`:`4` + `2`:`3`:`4` Deviance df Delta(Dev) Delta(df) P(> Delta(Dev) Model 1 3711.91367 155 Model 2 220.04285 108 3491.87082 47 0.00000 Model 3 47.74492 36 172.29794 72 0.00000 Saturated 0.00000 0 47.74492 36 0.09114 > > # Case 1. An array generated with xtabs. > > loglm(~ Type + Origin, xtabs(~ Type + Origin, Cars93)) Call: loglm(formula = ~Type + Origin, data = xtabs(~Type + Origin, Cars93)) Statistics: X^2 df P(> X^2) Likelihood Ratio 18.36179 5 0.00252554 Pearson 14.07985 5 0.01511005 > > # Case 2. Frequencies given as a vector in a data frame > names(quine) [1] "Eth" "Sex" "Age" "Lrn" "Days" > ## [1] "Eth" "Sex" "Age" "Lrn" "Days" > fm <- loglm(Days ~ .^2, quine) > gm <- glm(Days ~ .^2, poisson, quine) # check glm. > c(deviance(fm), deviance(gm)) # deviances agree [1] 1368.669 1368.669 > ## [1] 1368.7 1368.7 > c(fm$df, gm$df) # resid df do not! [1] 127 > c(fm$df, gm$df.residual) # resid df do not! [1] 127 128 > ## [1] 127 128 > # The loglm residual degrees of freedom is wrong because of > # a non-detectable redundancy in the model matrix. > > > > cleanEx() > nameEx("logtrans") > ### * logtrans > > flush(stderr()); flush(stdout()) > > ### Name: logtrans > ### Title: Estimate log Transformation Parameter > ### Aliases: logtrans logtrans.formula logtrans.lm logtrans.default > ### Keywords: regression models hplot > > ### ** Examples > > logtrans(Days ~ Age*Sex*Eth*Lrn, data = quine, + alpha = seq(0.75, 6.5, len=20)) > > > > cleanEx() > nameEx("lqs") > ### * lqs > > flush(stderr()); flush(stdout()) > > ### Name: lqs > ### Title: Resistant Regression > ### Aliases: lqs lqs.formula lqs.default lmsreg ltsreg > ### Keywords: models robust > > ### ** Examples > > set.seed(123) # make reproducible > lqs(stack.loss ~ ., data = stackloss) Call: lqs.formula(formula = stack.loss ~ ., data = stackloss) Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -3.631e+01 7.292e-01 4.167e-01 -8.131e-17 Scale estimates 0.9149 1.0148 > lqs(stack.loss ~ ., data = stackloss, method = "S", nsamp = "exact") Call: lqs.formula(formula = stack.loss ~ ., data = stackloss, nsamp = "exact", method = "S") Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -35.37611 0.82522 0.44248 -0.07965 Scale estimates 1.912 > > > > cleanEx() > nameEx("mca") > ### * mca > > flush(stderr()); flush(stdout()) > > ### Name: mca > ### Title: Multiple Correspondence Analysis > ### Aliases: mca print.mca > ### Keywords: category multivariate > > ### ** Examples > > farms.mca <- mca(farms, abbrev=TRUE) > farms.mca Call: mca(df = farms, abbrev = TRUE) Multiple correspondence analysis of 20 cases of 4 factors Correlations 0.806 0.745 cumulative % explained 26.87 51.71 > plot(farms.mca) > > > > cleanEx() > nameEx("menarche") > ### * menarche > > flush(stderr()); flush(stdout()) > > ### Name: menarche > ### Title: Age of Menarche in Warsaw > ### Aliases: menarche > ### Keywords: datasets > > ### ** Examples > > mprob <- glm(cbind(Menarche, Total - Menarche) ~ Age, + binomial(link = probit), data = menarche) > > > > cleanEx() > nameEx("motors") > ### * motors > > flush(stderr()); flush(stdout()) > > ### Name: motors > ### Title: Accelerated Life Testing of Motorettes > ### Aliases: motors > ### Keywords: datasets > > ### ** Examples > > library(survival) > plot(survfit(Surv(time, cens) ~ factor(temp), motors), conf.int = FALSE) > # fit Weibull model > motor.wei <- survreg(Surv(time, cens) ~ temp, motors) > summary(motor.wei) Call: survreg(formula = Surv(time, cens) ~ temp, data = motors) Value Std. Error z p (Intercept) 16.3185 0.62296 26.2 3.03e-151 temp -0.0453 0.00319 -14.2 6.74e-46 Log(scale) -1.0956 0.21480 -5.1 3.38e-07 Scale= 0.334 Weibull distribution Loglik(model)= -147.4 Loglik(intercept only)= -169.5 Chisq= 44.32 on 1 degrees of freedom, p= 2.8e-11 Number of Newton-Raphson Iterations: 7 n= 40 > # and predict at 130C > unlist(predict(motor.wei, data.frame(temp=130), se.fit = TRUE)) fit.1 se.fit.1 33813.06 7506.36 > > motor.cox <- coxph(Surv(time, cens) ~ temp, motors) > summary(motor.cox) Call: coxph(formula = Surv(time, cens) ~ temp, data = motors) n= 40, number of events= 17 coef exp(coef) se(coef) z Pr(>|z|) temp 0.09185 1.09620 0.02736 3.358 0.000786 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 exp(coef) exp(-coef) lower .95 upper .95 temp 1.096 0.9122 1.039 1.157 Concordance= 0.84 (se = 0.076 ) Rsquare= 0.472 (max possible= 0.936 ) Likelihood ratio test= 25.56 on 1 df, p=4.299e-07 Wald test = 11.27 on 1 df, p=0.0007863 Score (logrank) test = 22.73 on 1 df, p=1.862e-06 > # predict at temperature 200 > plot(survfit(motor.cox, newdata = data.frame(temp=200), + conf.type = "log-log")) > summary( survfit(motor.cox, newdata = data.frame(temp=130)) ) Call: survfit(formula = motor.cox, newdata = data.frame(temp = 130)) time n.risk n.event survival std.err lower 95% CI upper 95% CI 408 40 4 1.000 0.000254 0.999 1 504 36 3 1.000 0.000498 0.999 1 1344 28 2 0.999 0.001910 0.995 1 1440 26 1 0.998 0.002697 0.993 1 1764 20 1 0.996 0.005325 0.986 1 2772 19 1 0.994 0.007920 0.978 1 3444 18 1 0.991 0.010673 0.971 1 3542 17 1 0.988 0.013667 0.962 1 3780 16 1 0.985 0.016976 0.952 1 4860 15 1 0.981 0.020692 0.941 1 5196 14 1 0.977 0.024941 0.929 1 > > > > cleanEx() detaching ‘package:survival’ > nameEx("muscle") > ### * muscle > > flush(stderr()); flush(stdout()) > > ### Name: muscle > ### Title: Effect of Calcium Chloride on Muscle Contraction in Rat Hearts > ### Aliases: muscle > ### Keywords: datasets > > ### ** Examples > > A <- model.matrix(~ Strip - 1, data=muscle) > rats.nls1 <- nls(log(Length) ~ cbind(A, rho^Conc), + data = muscle, start = c(rho=0.1), algorithm="plinear") > (B <- coef(rats.nls1)) rho .lin.StripS01 .lin.StripS02 .lin.StripS03 .lin.StripS04 0.07776401 3.08304824 3.30137838 3.44562531 2.80464434 .lin.StripS05 .lin.StripS06 .lin.StripS07 .lin.StripS08 .lin.StripS09 2.60835015 3.03357725 3.52301734 3.38711844 3.46709396 .lin.StripS10 .lin.StripS11 .lin.StripS12 .lin.StripS13 .lin.StripS14 3.81438456 3.73878664 3.51332581 3.39741115 3.47088608 .lin.StripS15 .lin.StripS16 .lin.StripS17 .lin.StripS18 .lin.StripS19 3.72895847 3.31863862 3.37938673 2.96452195 3.58468686 .lin.StripS20 .lin.StripS21 .lin22 3.39628029 3.36998872 -2.96015460 > > st <- list(alpha = B[2:22], beta = B[23], rho = B[1]) > (rats.nls2 <- nls(log(Length) ~ alpha[Strip] + beta*rho^Conc, + data = muscle, start = st)) Nonlinear regression model model: log(Length) ~ alpha[Strip] + beta * rho^Conc data: muscle alpha..lin.StripS01 alpha..lin.StripS02 alpha..lin.StripS03 alpha..lin.StripS04 3.08305 3.30138 3.44563 2.80464 alpha..lin.StripS05 alpha..lin.StripS06 alpha..lin.StripS07 alpha..lin.StripS08 2.60835 3.03358 3.52302 3.38712 alpha..lin.StripS09 alpha..lin.StripS10 alpha..lin.StripS11 alpha..lin.StripS12 3.46709 3.81438 3.73879 3.51333 alpha..lin.StripS13 alpha..lin.StripS14 alpha..lin.StripS15 alpha..lin.StripS16 3.39741 3.47089 3.72896 3.31864 alpha..lin.StripS17 alpha..lin.StripS18 alpha..lin.StripS19 alpha..lin.StripS20 3.37939 2.96452 3.58469 3.39628 alpha..lin.StripS21 beta..lin22 rho.rho 3.36999 -2.96015 0.07776 residual sum-of-squares: 1.045 Number of iterations to convergence: 0 Achieved convergence tolerance: 4.923e-06 > > Muscle <- with(muscle, { + Muscle <- expand.grid(Conc = sort(unique(Conc)), Strip = levels(Strip)) + Muscle$Yhat <- predict(rats.nls2, Muscle) + Muscle <- cbind(Muscle, logLength = rep(as.numeric(NA), 126)) + ind <- match(paste(Strip, Conc), + paste(Muscle$Strip, Muscle$Conc)) + Muscle$logLength[ind] <- log(Length) + Muscle}) > > lattice::xyplot(Yhat ~ Conc | Strip, Muscle, as.table = TRUE, + ylim = range(c(Muscle$Yhat, Muscle$logLength), na.rm = TRUE), + subscripts = TRUE, xlab = "Calcium Chloride concentration (mM)", + ylab = "log(Length in mm)", panel = + function(x, y, subscripts, ...) { + panel.xyplot(x, Muscle$logLength[subscripts], ...) + llines(spline(x, y)) + }) > > > > cleanEx() > nameEx("mvrnorm") > ### * mvrnorm > > flush(stderr()); flush(stdout()) > > ### Name: mvrnorm > ### Title: Simulate from a Multivariate Normal Distribution > ### Aliases: mvrnorm > ### Keywords: distribution multivariate > > ### ** Examples > > Sigma <- matrix(c(10,3,3,2),2,2) > Sigma [,1] [,2] [1,] 10 3 [2,] 3 2 > var(mvrnorm(n = 1000, rep(0, 2), Sigma)) [,1] [,2] [1,] 10.697849 3.228279 [2,] 3.228279 2.165271 > var(mvrnorm(n = 1000, rep(0, 2), Sigma, empirical = TRUE)) [,1] [,2] [1,] 10 3 [2,] 3 2 > > > > cleanEx() > nameEx("negative.binomial") > ### * negative.binomial > > flush(stderr()); flush(stdout()) > > ### Name: negative.binomial > ### Title: Family function for Negative Binomial GLMs > ### Aliases: negative.binomial > ### Keywords: regression models > > ### ** Examples > > # Fitting a Negative Binomial model to the quine data > # with theta = 2 assumed known. > # > glm(Days ~ .^4, family = negative.binomial(2), data = quine) Call: glm(formula = Days ~ .^4, family = negative.binomial(2), data = quine) Coefficients: (Intercept) EthN SexM 3.0564 -0.1386 -0.4914 AgeF1 AgeF2 AgeF3 -0.6227 -2.3632 -0.3784 LrnSL EthN:SexM EthN:AgeF1 -1.9577 -0.7524 0.1029 EthN:AgeF2 EthN:AgeF3 EthN:LrnSL -0.5546 0.0633 2.2588 SexM:AgeF1 SexM:AgeF2 SexM:AgeF3 0.4092 3.1098 1.1145 SexM:LrnSL AgeF1:LrnSL AgeF2:LrnSL 1.5900 2.6421 4.8585 AgeF3:LrnSL EthN:SexM:AgeF1 EthN:SexM:AgeF2 NA -0.3105 0.3469 EthN:SexM:AgeF3 EthN:SexM:LrnSL EthN:AgeF1:LrnSL 0.8329 -0.1639 -3.5493 EthN:AgeF2:LrnSL EthN:AgeF3:LrnSL SexM:AgeF1:LrnSL -3.3315 NA -2.4285 SexM:AgeF2:LrnSL SexM:AgeF3:LrnSL EthN:SexM:AgeF1:LrnSL -4.1914 NA 2.1711 EthN:SexM:AgeF2:LrnSL EthN:SexM:AgeF3:LrnSL 2.1029 NA Degrees of Freedom: 145 Total (i.e. Null); 118 Residual Null Deviance: 280.2 Residual Deviance: 172 AIC: 1095 > > > > cleanEx() > nameEx("nlschools") > ### * nlschools > > flush(stderr()); flush(stdout()) > > ### Name: nlschools > ### Title: Eighth-Grade Pupils in the Netherlands > ### Aliases: nlschools > ### Keywords: datasets > > ### ** Examples > > ## Don't show: > op <- options(digits=5) > ## End(Don't show) > nl1 <- within(nlschools, { + IQave <- tapply(IQ, class, mean)[as.character(class)] + IQ <- IQ - IQave + }) > cen <- c("IQ", "IQave", "SES") > nl1[cen] <- scale(nl1[cen], center = TRUE, scale = FALSE) > > nl.lme <- nlme::lme(lang ~ IQ*COMB + IQave + SES, + random = ~ IQ | class, data = nl1) > summary(nl.lme) Linear mixed-effects model fit by REML Data: nl1 AIC BIC logLik 15120 15178 -7550.2 Random effects: Formula: ~IQ | class Structure: General positive-definite, Log-Cholesky parametrization StdDev Corr (Intercept) 2.78707 (Intr) IQ 0.48424 -0.516 Residual 6.24839 Fixed effects: lang ~ IQ * COMB + IQave + SES Value Std.Error DF t-value p-value (Intercept) 41.370 0.35364 2151 116.985 0.0000 IQ 2.124 0.10070 2151 21.088 0.0000 COMB1 -1.672 0.58719 130 -2.847 0.0051 IQave 3.248 0.30021 130 10.818 0.0000 SES 0.157 0.01465 2151 10.697 0.0000 IQ:COMB1 0.431 0.18594 2151 2.317 0.0206 Correlation: (Intr) IQ COMB1 IQave SES IQ -0.257 COMB1 -0.609 0.155 IQave -0.049 0.041 0.171 SES 0.010 -0.190 -0.001 -0.168 IQ:COMB1 0.139 -0.522 -0.206 -0.016 -0.003 Standardized Within-Group Residuals: Min Q1 Med Q3 Max -4.059387 -0.631084 0.065519 0.717864 2.794540 Number of Observations: 2287 Number of Groups: 133 > ## Don't show: > options(op) > ## End(Don't show) > > > > cleanEx() > nameEx("npk") > ### * npk > > flush(stderr()); flush(stdout()) > > ### Name: npk > ### Title: Classical N, P, K Factorial Experiment > ### Aliases: npk > ### Keywords: datasets > > ### ** Examples > > options(contrasts = c("contr.sum", "contr.poly")) > npk.aov <- aov(yield ~ block + N*P*K, npk) > npk.aov Call: aov(formula = yield ~ block + N * P * K, data = npk) Terms: block N P K N:P N:K P:K Sum of Squares 343.2950 189.2817 8.4017 95.2017 21.2817 33.1350 0.4817 Deg. of Freedom 5 1 1 1 1 1 1 Residuals Sum of Squares 185.2867 Deg. of Freedom 12 Residual standard error: 3.929447 1 out of 13 effects not estimable Estimated effects may be unbalanced > summary(npk.aov) Df Sum Sq Mean Sq F value Pr(>F) block 5 343.3 68.66 4.447 0.01594 * N 1 189.3 189.28 12.259 0.00437 ** P 1 8.4 8.40 0.544 0.47490 K 1 95.2 95.20 6.166 0.02880 * N:P 1 21.3 21.28 1.378 0.26317 N:K 1 33.1 33.14 2.146 0.16865 P:K 1 0.5 0.48 0.031 0.86275 Residuals 12 185.3 15.44 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > alias(npk.aov) Model : yield ~ block + N * P * K Complete : (Intercept) block1 block2 block3 block4 block5 N1 P1 K1 N1:P1 N1:K1 N1:P1:K1 0 1 -1 -1 -1 1 0 0 0 0 0 P1:K1 N1:P1:K1 0 > coef(npk.aov) (Intercept) block1 block2 block3 block4 block5 54.8750000 -0.8500000 2.5750000 5.9000000 -4.7500000 -4.3500000 N1 P1 K1 N1:P1 N1:K1 P1:K1 -2.8083333 0.5916667 1.9916667 -0.9416667 -1.1750000 0.1416667 > options(contrasts = c("contr.treatment", "contr.poly")) > npk.aov1 <- aov(yield ~ block + N + K, data = npk) > summary.lm(npk.aov1) Call: aov(formula = yield ~ block + N + K, data = npk) Residuals: Min 1Q Median 3Q Max -6.4083 -2.1438 0.2042 2.3292 7.0750 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 53.208 2.276 23.381 8.5e-14 *** block2 3.425 2.787 1.229 0.23690 block3 6.750 2.787 2.422 0.02769 * block4 -3.900 2.787 -1.399 0.18082 block5 -3.500 2.787 -1.256 0.22723 block6 2.325 2.787 0.834 0.41646 N1 5.617 1.609 3.490 0.00302 ** K1 -3.983 1.609 -2.475 0.02487 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 3.942 on 16 degrees of freedom Multiple R-squared: 0.7163, Adjusted R-squared: 0.5922 F-statistic: 5.772 on 7 and 16 DF, p-value: 0.001805 > se.contrast(npk.aov1, list(N=="0", N=="1"), data = npk) [1] 1.609175 > model.tables(npk.aov1, type = "means", se = TRUE) Tables of means Grand mean 54.875 block block 1 2 3 4 5 6 54.03 57.45 60.77 50.12 50.52 56.35 N N 0 1 52.07 57.68 K K 0 1 56.87 52.88 Standard errors for differences of means block N K 2.787 1.609 1.609 replic. 4 12 12 > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("oats") > ### * oats > > flush(stderr()); flush(stdout()) > > ### Name: oats > ### Title: Data from an Oats Field Trial > ### Aliases: oats > ### Keywords: datasets > > ### ** Examples > > oats$Nf <- ordered(oats$N, levels = sort(levels(oats$N))) > oats.aov <- aov(Y ~ Nf*V + Error(B/V), data = oats, qr = TRUE) > summary(oats.aov) Error: B Df Sum Sq Mean Sq F value Pr(>F) Residuals 5 15875 3175 Error: B:V Df Sum Sq Mean Sq F value Pr(>F) V 2 1786 893.2 1.485 0.272 Residuals 10 6013 601.3 Error: Within Df Sum Sq Mean Sq F value Pr(>F) Nf 3 20020 6673 37.686 2.46e-12 *** Nf:V 6 322 54 0.303 0.932 Residuals 45 7969 177 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > summary(oats.aov, split = list(Nf=list(L=1, Dev=2:3))) Error: B Df Sum Sq Mean Sq F value Pr(>F) Residuals 5 15875 3175 Error: B:V Df Sum Sq Mean Sq F value Pr(>F) V 2 1786 893.2 1.485 0.272 Residuals 10 6013 601.3 Error: Within Df Sum Sq Mean Sq F value Pr(>F) Nf 3 20020 6673 37.686 2.46e-12 *** Nf: L 1 19536 19536 110.323 1.09e-13 *** Nf: Dev 2 484 242 1.367 0.265 Nf:V 6 322 54 0.303 0.932 Nf:V: L 2 168 84 0.475 0.625 Nf:V: Dev 4 153 38 0.217 0.928 Residuals 45 7969 177 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > par(mfrow = c(1,2), pty = "s") > plot(fitted(oats.aov[[4]]), studres(oats.aov[[4]])) > abline(h = 0, lty = 2) > oats.pr <- proj(oats.aov) > qqnorm(oats.pr[[4]][,"Residuals"], ylab = "Stratum 4 residuals") > qqline(oats.pr[[4]][,"Residuals"]) > > par(mfrow = c(1,1), pty = "m") > oats.aov2 <- aov(Y ~ N + V + Error(B/V), data = oats, qr = TRUE) > model.tables(oats.aov2, type = "means", se = TRUE) Warning in model.tables.aovlist(oats.aov2, type = "means", se = TRUE) : SEs for type 'means' are not yet implemented Tables of means Grand mean 103.9722 N N 0.0cwt 0.2cwt 0.4cwt 0.6cwt 79.39 98.89 114.22 123.39 V V Golden.rain Marvellous Victory 104.50 109.79 97.63 > > > > graphics::par(get("par.postscript", pos = 'CheckExEnv')) > cleanEx() > nameEx("parcoord") > ### * parcoord > > flush(stderr()); flush(stdout()) > > ### Name: parcoord > ### Title: Parallel Coordinates Plot > ### Aliases: parcoord > ### Keywords: hplot > > ### ** Examples > > parcoord(state.x77[, c(7, 4, 6, 2, 5, 3)]) > > ir <- rbind(iris3[,,1], iris3[,,2], iris3[,,3]) > parcoord(log(ir)[, c(3, 4, 2, 1)], col = 1 + (0:149)%/%50) > > > > cleanEx() > nameEx("petrol") > ### * petrol > > flush(stderr()); flush(stdout()) > > ### Name: petrol > ### Title: N. L. Prater's Petrol Refinery Data > ### Aliases: petrol > ### Keywords: datasets > > ### ** Examples > > library(nlme) > Petrol <- petrol > Petrol[, 2:5] <- scale(as.matrix(Petrol[, 2:5]), scale = FALSE) > pet3.lme <- lme(Y ~ SG + VP + V10 + EP, + random = ~ 1 | No, data = Petrol) > pet3.lme <- update(pet3.lme, method = "ML") > pet4.lme <- update(pet3.lme, fixed = Y ~ V10 + EP) > anova(pet4.lme, pet3.lme) Model df AIC BIC logLik Test L.Ratio p-value pet4.lme 1 5 149.6119 156.9406 -69.80594 pet3.lme 2 7 149.3833 159.6435 -67.69166 1 vs 2 4.22855 0.1207 > > > > cleanEx() detaching ‘package:nlme’ > nameEx("plot.mca") > ### * plot.mca > > flush(stderr()); flush(stdout()) > > ### Name: plot.mca > ### Title: Plot Method for Objects of Class 'mca' > ### Aliases: plot.mca > ### Keywords: hplot multivariate > > ### ** Examples > > plot(mca(farms, abbrev = TRUE)) > > > > cleanEx() > nameEx("plot.profile") > ### * plot.profile > > flush(stderr()); flush(stdout()) > > ### Name: plot.profile > ### Title: Plotting Functions for 'profile' Objects > ### Aliases: plot.profile pairs.profile > ### Keywords: models hplot > > ### ** Examples > > ## see ?profile.glm for an example using glm fits. > > ## a version of example(profile.nls) from R >= 2.8.0 > fm1 <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD) > pr1 <- profile(fm1, alpha = 0.1) > MASS:::plot.profile(pr1) > pairs(pr1) # a little odd since the parameters are highly correlated > > ## an example from ?nls > x <- -(1:100)/10 > y <- 100 + 10 * exp(x / 2) + rnorm(x)/10 > nlmod <- nls(y ~ Const + A * exp(B * x), start=list(Const=100, A=10, B=1)) > pairs(profile(nlmod)) > > > > cleanEx() > nameEx("polr") > ### * polr > > flush(stderr()); flush(stdout()) > > ### Name: polr > ### Title: Ordered Logistic or Probit Regression > ### Aliases: polr > ### Keywords: models > > ### ** Examples > > options(contrasts = c("contr.treatment", "contr.poly")) > house.plr <- polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) > house.plr Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: InflMedium InflHigh TypeApartment TypeAtrium TypeTerrace 0.5663937 1.2888191 -0.5723501 -0.3661866 -1.0910149 ContHigh 0.3602841 Intercepts: Low|Medium Medium|High -0.4961353 0.6907083 Residual Deviance: 3479.149 AIC: 3495.149 > summary(house.plr, digits = 3) Re-fitting to get Hessian Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq) Coefficients: Value Std. Error t value InflMedium 0.566 0.1047 5.41 InflHigh 1.289 0.1272 10.14 TypeApartment -0.572 0.1192 -4.80 TypeAtrium -0.366 0.1552 -2.36 TypeTerrace -1.091 0.1515 -7.20 ContHigh 0.360 0.0955 3.77 Intercepts: Value Std. Error t value Low|Medium -0.496 0.125 -3.974 Medium|High 0.691 0.125 5.505 Residual Deviance: 3479.149 AIC: 3495.149 > ## slightly worse fit from > summary(update(house.plr, method = "probit", Hess = TRUE), digits = 3) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq, Hess = TRUE, method = "probit") Coefficients: Value Std. Error t value InflMedium 0.346 0.0641 5.40 InflHigh 0.783 0.0764 10.24 TypeApartment -0.348 0.0723 -4.81 TypeAtrium -0.218 0.0948 -2.30 TypeTerrace -0.664 0.0918 -7.24 ContHigh 0.222 0.0581 3.83 Intercepts: Value Std. Error t value Low|Medium -0.300 0.076 -3.937 Medium|High 0.427 0.076 5.585 Residual Deviance: 3479.689 AIC: 3495.689 > ## although it is not really appropriate, can fit > summary(update(house.plr, method = "loglog", Hess = TRUE), digits = 3) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq, Hess = TRUE, method = "loglog") Coefficients: Value Std. Error t value InflMedium 0.367 0.0727 5.05 InflHigh 0.790 0.0806 9.81 TypeApartment -0.349 0.0757 -4.61 TypeAtrium -0.196 0.0988 -1.98 TypeTerrace -0.698 0.1043 -6.69 ContHigh 0.268 0.0636 4.21 Intercepts: Value Std. Error t value Low|Medium 0.086 0.083 1.038 Medium|High 0.892 0.087 10.223 Residual Deviance: 3491.41 AIC: 3507.41 > summary(update(house.plr, method = "cloglog", Hess = TRUE), digits = 3) Call: polr(formula = Sat ~ Infl + Type + Cont, data = housing, weights = Freq, Hess = TRUE, method = "cloglog") Coefficients: Value Std. Error t value InflMedium 0.382 0.0703 5.44 InflHigh 0.915 0.0926 9.89 TypeApartment -0.407 0.0861 -4.73 TypeAtrium -0.281 0.1111 -2.52 TypeTerrace -0.742 0.1013 -7.33 ContHigh 0.209 0.0651 3.21 Intercepts: Value Std. Error t value Low|Medium -0.796 0.090 -8.881 Medium|High 0.055 0.086 0.647 Residual Deviance: 3484.053 AIC: 3500.053 > > predict(house.plr, housing, type = "p") Low Medium High 1 0.3784493 0.2876752 0.3338755 2 0.3784493 0.2876752 0.3338755 3 0.3784493 0.2876752 0.3338755 4 0.2568264 0.2742122 0.4689613 5 0.2568264 0.2742122 0.4689613 6 0.2568264 0.2742122 0.4689613 7 0.1436924 0.2110836 0.6452240 8 0.1436924 0.2110836 0.6452240 9 0.1436924 0.2110836 0.6452240 10 0.5190445 0.2605077 0.2204478 11 0.5190445 0.2605077 0.2204478 12 0.5190445 0.2605077 0.2204478 13 0.3798514 0.2875965 0.3325521 14 0.3798514 0.2875965 0.3325521 15 0.3798514 0.2875965 0.3325521 16 0.2292406 0.2643196 0.5064398 17 0.2292406 0.2643196 0.5064398 18 0.2292406 0.2643196 0.5064398 19 0.4675584 0.2745383 0.2579033 20 0.4675584 0.2745383 0.2579033 21 0.4675584 0.2745383 0.2579033 22 0.3326236 0.2876008 0.3797755 23 0.3326236 0.2876008 0.3797755 24 0.3326236 0.2876008 0.3797755 25 0.1948548 0.2474226 0.5577225 26 0.1948548 0.2474226 0.5577225 27 0.1948548 0.2474226 0.5577225 28 0.6444840 0.2114256 0.1440905 29 0.6444840 0.2114256 0.1440905 30 0.6444840 0.2114256 0.1440905 31 0.5071210 0.2641196 0.2287594 32 0.5071210 0.2641196 0.2287594 33 0.5071210 0.2641196 0.2287594 34 0.3331573 0.2876330 0.3792097 35 0.3331573 0.2876330 0.3792097 36 0.3331573 0.2876330 0.3792097 37 0.2980880 0.2837746 0.4181374 38 0.2980880 0.2837746 0.4181374 39 0.2980880 0.2837746 0.4181374 40 0.1942209 0.2470589 0.5587202 41 0.1942209 0.2470589 0.5587202 42 0.1942209 0.2470589 0.5587202 43 0.1047770 0.1724227 0.7228003 44 0.1047770 0.1724227 0.7228003 45 0.1047770 0.1724227 0.7228003 46 0.4294564 0.2820629 0.2884807 47 0.4294564 0.2820629 0.2884807 48 0.4294564 0.2820629 0.2884807 49 0.2993357 0.2839753 0.4166890 50 0.2993357 0.2839753 0.4166890 51 0.2993357 0.2839753 0.4166890 52 0.1718050 0.2328648 0.5953302 53 0.1718050 0.2328648 0.5953302 54 0.1718050 0.2328648 0.5953302 55 0.3798387 0.2875972 0.3325641 56 0.3798387 0.2875972 0.3325641 57 0.3798387 0.2875972 0.3325641 58 0.2579546 0.2745537 0.4674917 59 0.2579546 0.2745537 0.4674917 60 0.2579546 0.2745537 0.4674917 61 0.1444202 0.2117081 0.6438717 62 0.1444202 0.2117081 0.6438717 63 0.1444202 0.2117081 0.6438717 64 0.5583813 0.2471826 0.1944361 65 0.5583813 0.2471826 0.1944361 66 0.5583813 0.2471826 0.1944361 67 0.4178031 0.2838213 0.2983756 68 0.4178031 0.2838213 0.2983756 69 0.4178031 0.2838213 0.2983756 70 0.2584149 0.2746916 0.4668935 71 0.2584149 0.2746916 0.4668935 72 0.2584149 0.2746916 0.4668935 > addterm(house.plr, ~.^2, test = "Chisq") Single term additions Model: Sat ~ Infl + Type + Cont Df AIC LRT Pr(Chi) 3495.1 Infl:Type 6 3484.6 22.5093 0.0009786 *** Infl:Cont 2 3498.9 0.2090 0.9007957 Type:Cont 3 3492.5 8.6662 0.0340752 * --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > house.plr2 <- stepAIC(house.plr, ~.^2) Start: AIC=3495.15 Sat ~ Infl + Type + Cont Df AIC + Infl:Type 6 3484.6 + Type:Cont 3 3492.5 3495.1 + Infl:Cont 2 3498.9 - Cont 1 3507.5 - Type 3 3545.1 - Infl 2 3599.4 Step: AIC=3484.64 Sat ~ Infl + Type + Cont + Infl:Type Df AIC + Type:Cont 3 3482.7 3484.6 + Infl:Cont 2 3488.5 - Infl:Type 6 3495.1 - Cont 1 3497.8 Step: AIC=3482.69 Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Df AIC 3482.7 - Type:Cont 3 3484.6 + Infl:Cont 2 3486.6 - Infl:Type 6 3492.5 > house.plr2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: Sat ~ Infl + Type + Cont Final Model: Sat ~ Infl + Type + Cont + Infl:Type + Type:Cont Step Df Deviance Resid. Df Resid. Dev AIC 1 1673 3479.149 3495.149 2 + Infl:Type 6 22.509347 1667 3456.640 3484.640 3 + Type:Cont 3 7.945029 1664 3448.695 3482.695 > anova(house.plr, house.plr2) Likelihood ratio tests of ordinal regression models Response: Sat Model Resid. df Resid. Dev Test Df 1 Infl + Type + Cont 1673 3479.149 2 Infl + Type + Cont + Infl:Type + Type:Cont 1664 3448.695 1 vs 2 9 LR stat. Pr(Chi) 1 2 30.45438 0.0003670555 > > house.plr <- update(house.plr, Hess=TRUE) > pr <- profile(house.plr) > confint(pr) 2.5 % 97.5 % InflMedium 0.3616415 0.77195375 InflHigh 1.0409701 1.53958138 TypeApartment -0.8069590 -0.33940432 TypeAtrium -0.6705862 -0.06204495 TypeTerrace -1.3893863 -0.79533958 ContHigh 0.1733589 0.54792854 > plot(pr) > pairs(pr) > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("predict.glmmPQL") > ### * predict.glmmPQL > > flush(stderr()); flush(stdout()) > > ### Name: predict.glmmPQL > ### Title: Predict Method for glmmPQL Fits > ### Aliases: predict.glmmPQL > ### Keywords: models > > ### ** Examples > > fit <- glmmPQL(y ~ trt + I(week > 2), random = ~1 | ID, + family = binomial, data = bacteria) iteration 1 iteration 2 iteration 3 iteration 4 iteration 5 iteration 6 > predict(fit, bacteria, level = 0, type="response") [1] 0.9680779 0.9680779 0.8587270 0.8587270 0.9344832 0.9344832 0.7408574 [8] 0.7408574 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 0.9680779 [15] 0.9680779 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 [22] 0.8587270 0.8587270 0.8970307 0.8970307 0.6358511 0.6358511 0.9344832 [29] 0.9344832 0.7408574 0.7408574 0.7408574 0.9680779 0.9680779 0.8587270 [36] 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 [43] 0.9344832 0.7408574 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 [50] 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 0.9680779 0.9680779 [57] 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 0.8970307 [64] 0.8970307 0.6358511 0.6358511 0.6358511 0.9344832 0.9344832 0.7408574 [71] 0.7408574 0.7408574 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 [78] 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 0.9680779 0.9680779 [85] 0.8587270 0.8587270 0.8587270 0.9344832 0.9344832 0.7408574 0.7408574 [92] 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 [99] 0.8587270 0.8587270 0.8587270 0.9680779 0.9680779 0.8587270 0.8587270 [106] 0.8587270 0.9344832 0.9344832 0.7408574 0.7408574 0.7408574 0.8970307 [113] 0.8970307 0.6358511 0.6358511 0.9680779 0.9680779 0.8587270 0.9680779 [120] 0.9680779 0.8587270 0.8587270 0.8970307 0.8970307 0.6358511 0.6358511 [127] 0.6358511 0.9344832 0.7408574 0.7408574 0.7408574 0.9680779 0.8587270 [134] 0.8587270 0.8587270 0.8970307 0.8970307 0.6358511 0.6358511 0.6358511 [141] 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 0.9344832 0.7408574 [148] 0.8970307 0.8970307 0.6358511 0.6358511 0.9680779 0.9680779 0.8587270 [155] 0.8970307 0.8970307 0.6358511 0.9680779 0.9680779 0.8587270 0.8587270 [162] 0.8587270 0.9344832 0.9344832 0.7408574 0.7408574 0.7408574 0.9680779 [169] 0.9680779 0.8587270 0.8587270 0.8587270 0.9344832 0.7408574 0.8970307 [176] 0.8970307 0.6358511 0.6358511 0.6358511 0.9344832 0.9344832 0.7408574 [183] 0.7408574 0.9680779 0.9680779 0.8587270 0.8587270 0.8587270 0.8970307 [190] 0.8970307 0.6358511 0.6358511 0.6358511 0.9344832 0.9344832 0.7408574 [197] 0.7408574 0.7408574 0.8970307 0.6358511 0.6358511 0.9344832 0.9344832 [204] 0.7408574 0.7408574 0.7408574 0.8970307 0.8970307 0.6358511 0.6358511 [211] 0.9344832 0.9344832 0.7408574 0.7408574 0.7408574 0.9344832 0.9344832 [218] 0.7408574 0.7408574 0.7408574 attr(,"label") [1] "Predicted values" > predict(fit, bacteria, level = 1, type="response") X01 X01 X01 X01 X02 X02 X02 X02 0.9828449 0.9828449 0.9198935 0.9198935 0.9050782 0.9050782 0.6564944 0.6564944 X03 X03 X03 X03 X03 X04 X04 X04 0.9724022 0.9724022 0.8759665 0.8759665 0.8759665 0.9851548 0.9851548 0.9300763 X04 X04 X05 X05 X05 X05 X05 X06 0.9300763 0.9300763 0.9851548 0.9851548 0.9300763 0.9300763 0.9300763 0.9662755 X06 X06 X06 X07 X07 X07 X07 X07 0.9662755 0.8516962 0.8516962 0.7291679 0.7291679 0.3504978 0.3504978 0.3504978 X08 X08 X08 X08 X08 X09 X09 X09 0.9426815 0.9426815 0.7672499 0.7672499 0.7672499 0.9851548 0.9851548 0.9300763 X09 X09 X10 X10 X11 X11 X11 X11 0.9300763 0.9300763 0.9640326 0.8430706 0.9851548 0.9851548 0.9300763 0.9300763 X11 X12 X12 X12 X12 X12 X13 X13 0.9300763 0.8334870 0.8334870 0.5008219 0.5008219 0.5008219 0.9851548 0.9851548 X13 X13 X13 X14 X14 X14 X15 X15 0.9300763 0.9300763 0.9300763 0.8907227 0.8907227 0.6203155 0.9724022 0.9724022 X15 X15 X15 X16 X16 X16 X16 X16 0.8759665 0.8759665 0.8759665 0.9287777 0.9287777 0.7232833 0.7232833 0.7232833 X17 X17 X17 X17 X17 X18 X18 X18 0.9426815 0.9426815 0.7672499 0.7672499 0.7672499 0.7070916 0.7070916 0.3260827 X18 X18 X19 X19 X19 X19 X19 X20 0.3260827 0.3260827 0.8702991 0.8702991 0.5735499 0.5735499 0.5735499 0.9736293 X20 X20 X20 X21 X21 X21 X21 X21 0.9736293 0.8809564 0.8809564 0.9851548 0.9851548 0.9300763 0.9300763 0.9300763 Y01 Y01 Y01 Y01 Y01 Y02 Y02 Y02 0.9851548 0.9851548 0.9300763 0.9300763 0.9300763 0.7607971 0.7607971 0.3893126 Y02 Y02 Y03 Y03 Y03 Y03 Y03 Y04 0.3893126 0.3893126 0.8487181 0.8487181 0.5292976 0.5292976 0.5292976 0.5734482 Y04 Y04 Y04 Y05 Y05 Y05 Y06 Y06 0.5734482 0.2122655 0.2122655 0.7144523 0.7144523 0.3339997 0.9828449 0.9828449 Y06 Y06 Y07 Y07 Y07 Y07 Y07 Y08 0.9198935 0.9198935 0.8334870 0.8334870 0.5008219 0.5008219 0.5008219 0.9238389 Y08 Y08 Y08 Y09 Y09 Y09 Y09 Y10 0.7085660 0.7085660 0.7085660 0.9847299 0.9281899 0.9281899 0.9281899 0.9188296 Y10 Y10 Y10 Y10 Y11 Y11 Y11 Y11 0.9188296 0.6940862 0.6940862 0.6940862 0.9851548 0.9851548 0.9300763 0.9300763 Y11 Y12 Y12 Y13 Y13 Y13 Y13 Y14 0.9300763 0.9640326 0.8430706 0.5734482 0.5734482 0.2122655 0.2122655 0.9793383 Y14 Y14 Z01 Z01 Z01 Z02 Z02 Z02 0.9793383 0.9047659 0.9556329 0.9556329 0.8119328 0.9851548 0.9851548 0.9300763 Z02 Z02 Z03 Z03 Z03 Z03 Z03 Z05 0.9300763 0.9300763 0.9779690 0.9779690 0.8989642 0.8989642 0.8989642 0.8702991 Z05 Z05 Z05 Z05 Z06 Z06 Z07 Z07 0.8702991 0.5735499 0.5735499 0.5735499 0.8306525 0.4957505 0.8334870 0.8334870 Z07 Z07 Z07 Z09 Z09 Z09 Z09 Z10 0.5008219 0.5008219 0.5008219 0.9736293 0.9736293 0.8809564 0.8809564 0.9851548 Z10 Z10 Z10 Z10 Z11 Z11 Z11 Z11 0.9851548 0.9300763 0.9300763 0.9300763 0.9724022 0.9724022 0.8759665 0.8759665 Z11 Z14 Z14 Z14 Z14 Z14 Z15 Z15 0.8759665 0.9287777 0.9287777 0.7232833 0.7232833 0.7232833 0.9643851 0.8444172 Z15 Z19 Z19 Z19 Z19 Z19 Z20 Z20 0.8444172 0.9779690 0.9779690 0.8989642 0.8989642 0.8989642 0.7620490 0.7620490 Z20 Z20 Z24 Z24 Z24 Z24 Z24 Z26 0.3909523 0.3909523 0.8487181 0.8487181 0.5292976 0.5292976 0.5292976 0.9287777 Z26 Z26 Z26 Z26 0.9287777 0.7232833 0.7232833 0.7232833 attr(,"label") [1] "Predicted values" > > > > cleanEx() > nameEx("predict.lda") > ### * predict.lda > > flush(stderr()); flush(stdout()) > > ### Name: predict.lda > ### Title: Classify Multivariate Observations by Linear Discrimination > ### Aliases: predict.lda > ### Keywords: multivariate > > ### ** Examples > > tr <- sample(1:50, 25) > train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) > test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) > cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) > z <- lda(train, cl) > predict(z, test)$class [1] s s s s s s s s s s s s s s s s s s s s s s s s s c c c c c c c c c c v c c [39] c c c c c c c c c c c c v v v v v v v v v c v v v v v v v v v v v v v v v Levels: c s v > > > > cleanEx() > nameEx("predict.lqs") > ### * predict.lqs > > flush(stderr()); flush(stdout()) > > ### Name: predict.lqs > ### Title: Predict from an lqs Fit > ### Aliases: predict.lqs > ### Keywords: models > > ### ** Examples > > set.seed(123) > fm <- lqs(stack.loss ~ ., data = stackloss, method = "S", nsamp = "exact") > predict(fm, stackloss) 1 2 3 4 5 6 7 8 35.500000 35.579646 30.409292 19.477876 18.592920 19.035398 19.000000 19.000000 9 10 11 12 13 14 15 16 15.734513 14.079646 13.362832 13.000000 13.920354 13.486726 6.761062 7.000000 17 18 19 20 21 8.557522 8.000000 8.362832 13.154867 23.991150 > > > > cleanEx() > nameEx("predict.qda") > ### * predict.qda > > flush(stderr()); flush(stdout()) > > ### Name: predict.qda > ### Title: Classify from Quadratic Discriminant Analysis > ### Aliases: predict.qda > ### Keywords: multivariate > > ### ** Examples > > tr <- sample(1:50, 25) > train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) > test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) > cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) > zq <- qda(train, cl) > predict(zq, test)$class [1] s s s s s s s s s s s s s s s s s s s s s s s s s c c c c c c c c c c v c c [39] c c c c c c c c c c c c v v v v v v v v v v v v v v v v v v v v v v v v v Levels: c s v > > > > cleanEx() > nameEx("profile.glm") > ### * profile.glm > > flush(stderr()); flush(stdout()) > > ### Name: profile.glm > ### Title: Method for Profiling glm Objects > ### Aliases: profile.glm > ### Keywords: regression models > > ### ** Examples > > options(contrasts = c("contr.treatment", "contr.poly")) > ldose <- rep(0:5, 2) > numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) > sex <- factor(rep(c("M", "F"), c(6, 6))) > SF <- cbind(numdead, numalive = 20 - numdead) > budworm.lg <- glm(SF ~ sex*ldose, family = binomial) > pr1 <- profile(budworm.lg) > plot(pr1) > pairs(pr1) > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() > nameEx("qda") > ### * qda > > flush(stderr()); flush(stdout()) > > ### Name: qda > ### Title: Quadratic Discriminant Analysis > ### Aliases: qda qda.data.frame qda.default qda.formula qda.matrix > ### model.frame.qda print.qda > ### Keywords: multivariate > > ### ** Examples > > tr <- sample(1:50, 25) > train <- rbind(iris3[tr,,1], iris3[tr,,2], iris3[tr,,3]) > test <- rbind(iris3[-tr,,1], iris3[-tr,,2], iris3[-tr,,3]) > cl <- factor(c(rep("s",25), rep("c",25), rep("v",25))) > z <- qda(train, cl) > predict(z,test)$class [1] s s s s s s s s s s s s s s s s s s s s s s s s s c c c c c c c c c c v c c [39] c c c c c c c c c c c c v v v v v v v v v v v v v v v v v v v v v v v v v Levels: c s v > > > > cleanEx() > nameEx("rational") > ### * rational > > flush(stderr()); flush(stdout()) > > ### Name: rational > ### Title: Rational Approximation > ### Aliases: rational .rat > ### Keywords: math > > ### ** Examples > > X <- matrix(runif(25), 5, 5) > zapsmall(solve(X, X/5)) # print near-zeroes as zero [,1] [,2] [,3] [,4] [,5] [1,] 0.2 0.0 0.0 0.0 0.0 [2,] 0.0 0.2 0.0 0.0 0.0 [3,] 0.0 0.0 0.2 0.0 0.0 [4,] 0.0 0.0 0.0 0.2 0.0 [5,] 0.0 0.0 0.0 0.0 0.2 > rational(solve(X, X/5)) [,1] [,2] [,3] [,4] [,5] [1,] 0.2 0.0 0.0 0.0 0.0 [2,] 0.0 0.2 0.0 0.0 0.0 [3,] 0.0 0.0 0.2 0.0 0.0 [4,] 0.0 0.0 0.0 0.2 0.0 [5,] 0.0 0.0 0.0 0.0 0.2 > > > > cleanEx() > nameEx("renumerate") > ### * renumerate > > flush(stderr()); flush(stdout()) > > ### Name: renumerate > ### Title: Convert a Formula Transformed by 'denumerate' > ### Aliases: renumerate renumerate.formula > ### Keywords: models > > ### ** Examples > > denumerate(~(1+2+3)^3 + a/b) ~(.v1 + .v2 + .v3)^3 + a/b > ## ~ (.v1 + .v2 + .v3)^3 + a/b > renumerate(.Last.value) ~(`1` + `2` + `3`)^3 + a/b > ## ~ (1 + 2 + 3)^3 + a/b > > > > cleanEx() > nameEx("rlm") > ### * rlm > > flush(stderr()); flush(stdout()) > > ### Name: rlm > ### Title: Robust Fitting of Linear Models > ### Aliases: rlm rlm.default rlm.formula print.rlm predict.rlm psi.bisquare > ### psi.hampel psi.huber > ### Keywords: models robust > > ### ** Examples > > summary(rlm(stack.loss ~ ., stackloss)) Call: rlm(formula = stack.loss ~ ., data = stackloss) Residuals: Min 1Q Median 3Q Max -8.91753 -1.73127 0.06187 1.54306 6.50163 Coefficients: Value Std. Error t value (Intercept) -41.0265 9.8073 -4.1832 Air.Flow 0.8294 0.1112 7.4597 Water.Temp 0.9261 0.3034 3.0524 Acid.Conc. -0.1278 0.1289 -0.9922 Residual standard error: 2.441 on 17 degrees of freedom > rlm(stack.loss ~ ., stackloss, psi = psi.hampel, init = "lts") Call: rlm(formula = stack.loss ~ ., data = stackloss, psi = psi.hampel, init = "lts") Converged in 9 iterations Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -40.4747826 0.7410853 1.2250730 -0.1455245 Degrees of freedom: 21 total; 17 residual Scale estimate: 3.09 > rlm(stack.loss ~ ., stackloss, psi = psi.bisquare) Call: rlm(formula = stack.loss ~ ., data = stackloss, psi = psi.bisquare) Converged in 11 iterations Coefficients: (Intercept) Air.Flow Water.Temp Acid.Conc. -42.2852537 0.9275471 0.6507322 -0.1123310 Degrees of freedom: 21 total; 17 residual Scale estimate: 2.28 > > > > cleanEx() > nameEx("rms.curv") > ### * rms.curv > > flush(stderr()); flush(stdout()) > > ### Name: rms.curv > ### Title: Relative Curvature Measures for Non-Linear Regression > ### Aliases: rms.curv print.rms.curv > ### Keywords: nonlinear > > ### ** Examples > > # The treated sample from the Puromycin data > mmcurve <- deriv3(~ Vm * conc/(K + conc), c("Vm", "K"), + function(Vm, K, conc) NULL) > Treated <- Puromycin[Puromycin$state == "treated", ] > (Purfit1 <- nls(rate ~ mmcurve(Vm, K, conc), data = Treated, + start = list(Vm=200, K=0.1))) Nonlinear regression model model: rate ~ mmcurve(Vm, K, conc) data: Treated Vm K 212.68363 0.06412 residual sum-of-squares: 1195 Number of iterations to convergence: 6 Achieved convergence tolerance: 6.096e-06 > rms.curv(Purfit1) Parameter effects: c^theta x sqrt(F) = 0.2121 Intrinsic: c^iota x sqrt(F) = 0.092 > ##Parameter effects: c^theta x sqrt(F) = 0.2121 > ## Intrinsic: c^iota x sqrt(F) = 0.092 > > > > cleanEx() > nameEx("rnegbin") > ### * rnegbin > > flush(stderr()); flush(stdout()) > > ### Name: rnegbin > ### Title: Simulate Negative Binomial Variates > ### Aliases: rnegbin > ### Keywords: distribution > > ### ** Examples > > # Negative Binomials with means fitted(fm) and theta = 4.5 > fm <- glm.nb(Days ~ ., data = quine) > dummy <- rnegbin(fitted(fm), theta = 4.5) > > > > cleanEx() > nameEx("sammon") > ### * sammon > > flush(stderr()); flush(stdout()) > > ### Name: sammon > ### Title: Sammon's Non-Linear Mapping > ### Aliases: sammon > ### Keywords: multivariate > > ### ** Examples > > swiss.x <- as.matrix(swiss[, -1]) > swiss.sam <- sammon(dist(swiss.x)) Initial stress : 0.00824 stress after 10 iters: 0.00439, magic = 0.338 stress after 20 iters: 0.00383, magic = 0.500 stress after 30 iters: 0.00383, magic = 0.500 > plot(swiss.sam$points, type = "n") > text(swiss.sam$points, labels = as.character(1:nrow(swiss.x))) > > > > cleanEx() > nameEx("stepAIC") > ### * stepAIC > > flush(stderr()); flush(stdout()) > > ### Name: stepAIC > ### Title: Choose a model by AIC in a Stepwise Algorithm > ### Aliases: stepAIC extractAIC.gls terms.gls extractAIC.lme terms.lme > ### Keywords: models > > ### ** Examples > > quine.hi <- aov(log(Days + 2.5) ~ .^4, quine) > quine.nxt <- update(quine.hi, . ~ . - Eth:Sex:Age:Lrn) > quine.stp <- stepAIC(quine.nxt, + scope = list(upper = ~Eth*Sex*Age*Lrn, lower = ~1), + trace = FALSE) > quine.stp$anova Stepwise Model Path Analysis of Deviance Table Initial Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Age + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Final Model: log(Days + 2.5) ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn Step Df Deviance Resid. Df Resid. Dev AIC 1 120 64.09900 -68.18396 2 - Eth:Sex:Age 3 0.973869 123 65.07287 -71.98244 3 - Sex:Age:Lrn 2 1.526754 125 66.59962 -72.59652 > > cpus1 <- cpus > for(v in names(cpus)[2:7]) + cpus1[[v]] <- cut(cpus[[v]], unique(quantile(cpus[[v]])), + include.lowest = TRUE) > cpus0 <- cpus1[, 2:8] # excludes names, authors' predictions > cpus.samp <- sample(1:209, 100) > cpus.lm <- lm(log10(perf) ~ ., data = cpus1[cpus.samp,2:8]) > cpus.lm2 <- stepAIC(cpus.lm, trace = FALSE) > cpus.lm2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: log10(perf) ~ syct + mmin + mmax + cach + chmin + chmax Final Model: log10(perf) ~ mmax + cach + chmax Step Df Deviance Resid. Df Resid. Dev AIC 1 82 3.362324 -303.2538 2 - syct 3 0.0445369 85 3.406861 -307.9379 3 - mmin 3 0.1947053 88 3.601566 -308.3801 4 - chmin 3 0.1924133 91 3.793980 -309.1755 > > example(birthwt) brthwt> bwt <- with(birthwt, { brthwt+ race <- factor(race, labels = c("white", "black", "other")) brthwt+ ptd <- factor(ptl > 0) brthwt+ ftv <- factor(ftv) brthwt+ levels(ftv)[-(1:2)] <- "2+" brthwt+ data.frame(low = factor(low), age, lwt, race, smoke = (smoke > 0), brthwt+ ptd, ht = (ht > 0), ui = (ui > 0), ftv) brthwt+ }) brthwt> options(contrasts = c("contr.treatment", "contr.poly")) brthwt> glm(low ~ ., binomial, bwt) Call: glm(formula = low ~ ., family = binomial, data = bwt) Coefficients: (Intercept) age lwt raceblack raceother smokeTRUE 0.82302 -0.03723 -0.01565 1.19241 0.74068 0.75553 ptdTRUE htTRUE uiTRUE ftv1 ftv2+ 1.34376 1.91317 0.68020 -0.43638 0.17901 Degrees of Freedom: 188 Total (i.e. Null); 178 Residual Null Deviance: 234.7 Residual Deviance: 195.5 AIC: 217.5 > birthwt.glm <- glm(low ~ ., family = binomial, data = bwt) > birthwt.step <- stepAIC(birthwt.glm, trace = FALSE) > birthwt.step$anova Stepwise Model Path Analysis of Deviance Table Initial Model: low ~ age + lwt + race + smoke + ptd + ht + ui + ftv Final Model: low ~ lwt + race + smoke + ptd + ht + ui Step Df Deviance Resid. Df Resid. Dev AIC 1 178 195.4755 217.4755 2 - ftv 2 1.358185 180 196.8337 214.8337 3 - age 1 1.017866 181 197.8516 213.8516 > birthwt.step2 <- stepAIC(birthwt.glm, ~ .^2 + I(scale(age)^2) + + I(scale(lwt)^2), trace = FALSE) > birthwt.step2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: low ~ age + lwt + race + smoke + ptd + ht + ui + ftv Final Model: low ~ age + lwt + smoke + ptd + ht + ui + ftv + age:ftv + smoke:ui Step Df Deviance Resid. Df Resid. Dev AIC 1 178 195.4755 217.4755 2 + age:ftv 2 12.474896 176 183.0006 209.0006 3 + smoke:ui 1 3.056805 175 179.9438 207.9438 4 - race 2 3.129586 177 183.0734 207.0734 > > quine.nb <- glm.nb(Days ~ .^4, data = quine) > quine.nb2 <- stepAIC(quine.nb) Start: AIC=1095.32 Days ~ (Eth + Sex + Age + Lrn)^4 Df AIC - Eth:Sex:Age:Lrn 2 1092.7 1095.3 Step: AIC=1092.73 Days ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Age + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Df AIC - Eth:Sex:Age 3 1089.4 1092.7 - Eth:Sex:Lrn 1 1093.3 - Eth:Age:Lrn 2 1094.7 - Sex:Age:Lrn 2 1095.0 Step: AIC=1089.41 Days ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Df AIC 1089.4 - Sex:Age:Lrn 2 1091.1 - Eth:Age:Lrn 2 1091.2 - Eth:Sex:Lrn 1 1092.5 > quine.nb2$anova Stepwise Model Path Analysis of Deviance Table Initial Model: Days ~ (Eth + Sex + Age + Lrn)^4 Final Model: Days ~ Eth + Sex + Age + Lrn + Eth:Sex + Eth:Age + Eth:Lrn + Sex:Age + Sex:Lrn + Age:Lrn + Eth:Sex:Lrn + Eth:Age:Lrn + Sex:Age:Lrn Step Df Deviance Resid. Df Resid. Dev AIC 1 118 167.4535 1095.324 2 - Eth:Sex:Age:Lrn 2 0.09746244 120 167.5509 1092.728 3 - Eth:Sex:Age 3 0.11060087 123 167.4403 1089.409 > > > > cleanEx() > nameEx("summary.negbin") > ### * summary.negbin > > flush(stderr()); flush(stdout()) > > ### Name: summary.negbin > ### Title: Summary Method Function for Objects of Class 'negbin' > ### Aliases: summary.negbin print.summary.negbin > ### Keywords: models > > ### ** Examples > > summary(glm.nb(Days ~ Eth*Age*Lrn*Sex, quine, link = log)) Call: glm.nb(formula = Days ~ Eth * Age * Lrn * Sex, data = quine, link = log, init.theta = 1.928360145) Deviance Residuals: Min 1Q Median 3Q Max -3.2377 -0.9079 -0.2019 0.5173 1.7043 Coefficients: (4 not defined because of singularities) Estimate Std. Error z value Pr(>|z|) (Intercept) 3.0564 0.3760 8.128 4.38e-16 *** EthN -0.1386 0.5334 -0.260 0.795023 AgeF1 -0.6227 0.5125 -1.215 0.224334 AgeF2 -2.3632 1.0770 -2.194 0.028221 * AgeF3 -0.3784 0.4546 -0.832 0.405215 LrnSL -1.9577 0.9967 -1.964 0.049493 * SexM -0.4914 0.5104 -0.963 0.335653 EthN:AgeF1 0.1029 0.7123 0.144 0.885175 EthN:AgeF2 -0.5546 1.6798 -0.330 0.741297 EthN:AgeF3 0.0633 0.6396 0.099 0.921159 EthN:LrnSL 2.2588 1.3019 1.735 0.082743 . AgeF1:LrnSL 2.6421 1.0821 2.442 0.014618 * AgeF2:LrnSL 4.8585 1.4423 3.369 0.000755 *** AgeF3:LrnSL NA NA NA NA EthN:SexM -0.7524 0.7220 -1.042 0.297400 AgeF1:SexM 0.4092 0.8299 0.493 0.621973 AgeF2:SexM 3.1098 1.1655 2.668 0.007624 ** AgeF3:SexM 1.1145 0.6365 1.751 0.079926 . LrnSL:SexM 1.5900 1.1499 1.383 0.166750 EthN:AgeF1:LrnSL -3.5493 1.4270 -2.487 0.012876 * EthN:AgeF2:LrnSL -3.3315 2.0919 -1.593 0.111256 EthN:AgeF3:LrnSL NA NA NA NA EthN:AgeF1:SexM -0.3105 1.2055 -0.258 0.796735 EthN:AgeF2:SexM 0.3469 1.7965 0.193 0.846875 EthN:AgeF3:SexM 0.8329 0.8970 0.929 0.353092 EthN:LrnSL:SexM -0.1639 1.5250 -0.107 0.914411 AgeF1:LrnSL:SexM -2.4285 1.4201 -1.710 0.087246 . AgeF2:LrnSL:SexM -4.1914 1.6201 -2.587 0.009679 ** AgeF3:LrnSL:SexM NA NA NA NA EthN:AgeF1:LrnSL:SexM 2.1711 1.9192 1.131 0.257963 EthN:AgeF2:LrnSL:SexM 2.1029 2.3444 0.897 0.369718 EthN:AgeF3:LrnSL:SexM NA NA NA NA --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for Negative Binomial(1.9284) family taken to be 1) Null deviance: 272.29 on 145 degrees of freedom Residual deviance: 167.45 on 118 degrees of freedom AIC: 1097.3 Number of Fisher Scoring iterations: 1 Theta: 1.928 Std. Err.: 0.269 2 x log-likelihood: -1039.324 > > > > cleanEx() > nameEx("summary.rlm") > ### * summary.rlm > > flush(stderr()); flush(stdout()) > > ### Name: summary.rlm > ### Title: Summary Method for Robust Linear Models > ### Aliases: summary.rlm print.summary.rlm > ### Keywords: robust > > ### ** Examples > > summary(rlm(calls ~ year, data = phones, maxit = 50)) Call: rlm(formula = calls ~ year, data = phones, maxit = 50) Residuals: Min 1Q Median 3Q Max -18.314 -5.953 -1.681 26.460 173.769 Coefficients: Value Std. Error t value (Intercept) -102.6222 26.6082 -3.8568 year 2.0414 0.4299 4.7480 Residual standard error: 9.032 on 22 degrees of freedom > > > > cleanEx() > nameEx("theta.md") > ### * theta.md > > flush(stderr()); flush(stdout()) > > ### Name: theta.md > ### Title: Estimate theta of the Negative Binomial > ### Aliases: theta.md theta.ml theta.mm > ### Keywords: models > > ### ** Examples > > quine.nb <- glm.nb(Days ~ .^2, data = quine) > theta.md(quine$Days, fitted(quine.nb), dfr = df.residual(quine.nb)) [1] 1.135441 > theta.ml(quine$Days, fitted(quine.nb)) [1] 1.603641 attr(,"SE") [1] 0.2138379 > theta.mm(quine$Days, fitted(quine.nb), dfr = df.residual(quine.nb)) [1] 1.562879 > > ## weighted example > yeast <- data.frame(cbind(numbers = 0:5, fr = c(213, 128, 37, 18, 3, 1))) > fit <- glm.nb(numbers ~ 1, weights = fr, data = yeast) > summary(fit) Call: glm.nb(formula = numbers ~ 1, data = yeast, weights = fr, init.theta = 3.586087428, link = log) Deviance Residuals: 1 2 3 4 5 6 -16.314 3.682 6.923 7.555 4.033 2.813 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -0.38199 0.06603 -5.785 7.25e-09 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for Negative Binomial(3.5861) family taken to be 1) Null deviance: 408.9 on 5 degrees of freedom Residual deviance: 408.9 on 5 degrees of freedom AIC: 897.06 Number of Fisher Scoring iterations: 1 Theta: 3.59 Std. Err.: 1.75 2 x log-likelihood: -893.063 > mu <- fitted(fit) > theta.md(yeast$numbers, mu, dfr = 399, weights = yeast$fr) [1] 3.027079 > theta.ml(yeast$numbers, mu, limit = 15, weights = yeast$fr) [1] 3.586087 attr(,"SE") [1] 1.749609 > theta.mm(yeast$numbers, mu, dfr = 399, weights = yeast$fr) [1] 3.549593 > > > > cleanEx() > nameEx("ucv") > ### * ucv > > flush(stderr()); flush(stdout()) > > ### Name: ucv > ### Title: Unbiased Cross-Validation for Bandwidth Selection > ### Aliases: ucv > ### Keywords: dplot > > ### ** Examples > > ucv(geyser$duration) Warning in ucv(geyser$duration) : minimum occurred at one end of the range [1] 0.1746726 > > > > cleanEx() > nameEx("waders") > ### * waders > > flush(stderr()); flush(stdout()) > > ### Name: waders > ### Title: Counts of Waders at 15 Sites in South Africa > ### Aliases: waders > ### Keywords: datasets > > ### ** Examples > > plot(corresp(waders, nf=2)) > > > > cleanEx() > nameEx("whiteside") > ### * whiteside > > flush(stderr()); flush(stdout()) > > ### Name: whiteside > ### Title: House Insulation: Whiteside's Data > ### Aliases: whiteside > ### Keywords: datasets > > ### ** Examples > > require(lattice) Loading required package: lattice > xyplot(Gas ~ Temp | Insul, whiteside, panel = + function(x, y, ...) { + panel.xyplot(x, y, ...) + panel.lmline(x, y, ...) + }, xlab = "Average external temperature (deg. C)", + ylab = "Gas consumption (1000 cubic feet)", aspect = "xy", + strip = function(...) strip.default(..., style = 1)) > > gasB <- lm(Gas ~ Temp, whiteside, subset = Insul=="Before") > gasA <- update(gasB, subset = Insul=="After") > summary(gasB) Call: lm(formula = Gas ~ Temp, data = whiteside, subset = Insul == "Before") Residuals: Min 1Q Median 3Q Max -0.62020 -0.19947 0.06068 0.16770 0.59778 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.85383 0.11842 57.88 <2e-16 *** Temp -0.39324 0.01959 -20.08 <2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.2813 on 24 degrees of freedom Multiple R-squared: 0.9438, Adjusted R-squared: 0.9415 F-statistic: 403.1 on 1 and 24 DF, p-value: < 2.2e-16 > summary(gasA) Call: lm(formula = Gas ~ Temp, data = whiteside, subset = Insul == "After") Residuals: Min 1Q Median 3Q Max -0.97802 -0.11082 0.02672 0.25294 0.63803 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 4.72385 0.12974 36.41 < 2e-16 *** Temp -0.27793 0.02518 -11.04 1.05e-11 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.3548 on 28 degrees of freedom Multiple R-squared: 0.8131, Adjusted R-squared: 0.8064 F-statistic: 121.8 on 1 and 28 DF, p-value: 1.046e-11 > gasBA <- lm(Gas ~ Insul/Temp - 1, whiteside) > summary(gasBA) Call: lm(formula = Gas ~ Insul/Temp - 1, data = whiteside) Residuals: Min 1Q Median 3Q Max -0.97802 -0.18011 0.03757 0.20930 0.63803 Coefficients: Estimate Std. Error t value Pr(>|t|) InsulBefore 6.85383 0.13596 50.41 <2e-16 *** InsulAfter 4.72385 0.11810 40.00 <2e-16 *** InsulBefore:Temp -0.39324 0.02249 -17.49 <2e-16 *** InsulAfter:Temp -0.27793 0.02292 -12.12 <2e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.323 on 52 degrees of freedom Multiple R-squared: 0.9946, Adjusted R-squared: 0.9942 F-statistic: 2391 on 4 and 52 DF, p-value: < 2.2e-16 > > gasQ <- lm(Gas ~ Insul/(Temp + I(Temp^2)) - 1, whiteside) > coef(summary(gasQ)) Estimate Std. Error t value Pr(>|t|) InsulBefore 6.759215179 0.150786777 44.826312 4.854615e-42 InsulAfter 4.496373920 0.160667904 27.985514 3.302572e-32 InsulBefore:Temp -0.317658735 0.062965170 -5.044991 6.362323e-06 InsulAfter:Temp -0.137901603 0.073058019 -1.887563 6.489554e-02 InsulBefore:I(Temp^2) -0.008472572 0.006624737 -1.278930 2.068259e-01 InsulAfter:I(Temp^2) -0.014979455 0.007447107 -2.011446 4.968398e-02 > > gasPR <- lm(Gas ~ Insul + Temp, whiteside) > anova(gasPR, gasBA) Analysis of Variance Table Model 1: Gas ~ Insul + Temp Model 2: Gas ~ Insul/Temp - 1 Res.Df RSS Df Sum of Sq F Pr(>F) 1 53 6.7704 2 52 5.4252 1 1.3451 12.893 0.0007307 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > options(contrasts = c("contr.treatment", "contr.poly")) > gasBA1 <- lm(Gas ~ Insul*Temp, whiteside) > coef(summary(gasBA1)) Estimate Std. Error t value Pr(>|t|) (Intercept) 6.8538277 0.13596397 50.409146 7.997414e-46 InsulAfter -2.1299780 0.18009172 -11.827185 2.315921e-16 Temp -0.3932388 0.02248703 -17.487358 1.976009e-23 InsulAfter:Temp 0.1153039 0.03211212 3.590665 7.306852e-04 > > > > base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly")) > cleanEx() detaching ‘package:lattice’ > nameEx("width.SJ") > ### * width.SJ > > flush(stderr()); flush(stdout()) > > ### Name: width.SJ > ### Title: Bandwidth Selection by Pilot Estimation of Derivatives > ### Aliases: width.SJ > ### Keywords: dplot > > ### ** Examples > > width.SJ(geyser$duration, method = "dpi") [1] 0.5747852 > width.SJ(geyser$duration) [1] 0.360518 > > width.SJ(galaxies, method = "dpi") [1] 3256.151 > width.SJ(galaxies) [1] 2566.423 > > > > cleanEx() > nameEx("wtloss") > ### * wtloss > > flush(stderr()); flush(stdout()) > > ### Name: wtloss > ### Title: Weight Loss Data from an Obese Patient > ### Aliases: wtloss > ### Keywords: datasets > > ### ** Examples > > wtloss.fm <- nls(Weight ~ b0 + b1*2^(-Days/th), + data = wtloss, start = list(b0=90, b1=95, th=120)) > wtloss.fm Nonlinear regression model model: Weight ~ b0 + b1 * 2^(-Days/th) data: wtloss b0 b1 th 81.37 102.68 141.91 residual sum-of-squares: 39.24 Number of iterations to convergence: 3 Achieved convergence tolerance: 4.324e-06 > plot(wtloss) > with(wtloss, lines(Days, fitted(wtloss.fm))) > > > > ### *