RcmdrMisc/0000755000176200001440000000000012560163414012137 5ustar liggesusersRcmdrMisc/NAMESPACE0000744000176200001440000000214012560142400013344 0ustar liggesusers# last modified 2015-07-13 by J. Fox importFrom(Hmisc, rcorr, print.rcorr) importFrom(MASS, stepAIC) importFrom(abind, abind) importFrom(e1071, skewness, kurtosis) importFrom(car, showLabels, hccm, linearHypothesis) importFrom(sandwich, vcovHAC) importFrom(colorspace, rainbow_hcl) importFrom(readxl, read_excel, excel_sheets) importFrom(graphics, abline, arrows, axis, barplot, box, hist, legend, lines, matplot, mtext, par, plot, points, strheight, text) importFrom(grDevices, n2mfrow, palette) importFrom(stats, coef, complete.cases, cor, dist, formula, kmeans, model.matrix, na.omit, p.adjust, pf, pt, qt, quantile, sd, update) export(assignCluster, Barplot, bin.var, colPercents, Dotplot, excel_sheets, Hist, indexplot, KMeans, lineplot, mergeRows, numSummary, partial.cor, plotDistr, plotMeans, rcorr.adjust, readXL, reliability, rowPercents, stepwise, summarySandwich, totPercents) S3method(mergeRows, data.frame) S3method(print, numSummary) S3method(print, partial.cor) S3method(print, reliability) S3method(print, rcorr.adjust) S3method(summarySandwich, lm) RcmdrMisc/NEWS0000744000176200001440000000156212560142400012633 0ustar liggesusersChanges to Version 1.0-3 o Fixed bug in rcorr.adjust() that didn't properly convert .000 to <.001 for pairwise-complete correlations (reported by Bob Muenchen). o Added Barplot() and Dotplot(). o Added readXL(), export excel_sheets(), both from readxl package. o Conform to new CRAN package import requirements. Changes to Version 1.0-2 o Updated the following inadvertently reverted functions (and docs): partial.cor(), numSummary(), Hist(), rcorr.adjust() (following bug report by Mark Dunning). o Hist() reports a warning but doesn't fail for empty groups. Changes to Version 1.0-1 o Added "se(mean)" to numSummary(). Changes to Version 1.0-0 o First version of the package, with functions moved from the Rcmdr package to make them more conveniently available to other CRAN packages (at the suggestion of Liviu Andronic). RcmdrMisc/R/0000755000176200001440000000000012560142400012330 5ustar liggesusersRcmdrMisc/R/Dotplot.R0000744000176200001440000000562512560142400014111 0ustar liggesusersDotplot <- function(x, by, bin=FALSE, breaks, xlim, xlab=deparse(substitute(x))){ dotplot <- function(x, by, bin=FALSE, breaks, xlim, xlab=deparse(substitute(x)), main="", correction=1/3, correction.char=1, y.max){ if (bin) hist <- hist(x, breaks=breaks, plot=FALSE) if (missing(by)){ y <- if (bin) hist$counts else table(x) x <- if (bin) hist$mids else sort(unique(x)) plot(range(x), 0:1, type="n", xlab=xlab, ylab="", main=main, axes=FALSE, xlim=xlim) y.limits <- par("usr")[3:4] char.height <- correction.char*par("cxy")[2] axis(1, pos=0) if (missing(y.max)) y.max <- max(y) abline(h=0) cex <- min(((y.limits[2] - y.limits[1])/char.height)/ y.max, 2) for (i in 1:length(y)){ if (y[i] == 0) next points(rep(x[i], y[i]), cex*correction*char.height*seq(1, y[i]), pch=16, cex=cex, xpd=TRUE) } return(invisible(NULL)) } else{ if (missing(xlim)) xlim <- range(x) levels <- levels(by) n.groups <- length(levels) save.par <- par(mfrow=c(n.groups, 1)) on.exit(par(save.par)) if (bin){ for(level in levels){ # compute histograms by level to find maximum count max.count <- 0 hist.level <- hist(x[by == level], breaks=hist$breaks, plot=FALSE) max.count <- max(max.count, hist.level$counts) } for (level in levels){ dotplot(x[by == level], xlab=xlab, main=paste(label.by, "=", level), bin=TRUE, breaks=hist$breaks, xlim=xlim, correction=1/2, correction.char=0.5, y.max=max.count) } } else { y <- table(x, by) for (level in levels){ dotplot(x[by == level], xlab=xlab, main=paste(label.by, "=", level), xlim=xlim, correction=1/2, correction.char=0.5, y.max=max(y)) } } } } if (!is.numeric(x)) stop("x must be a numeric variable") if (!missing(by) && !is.factor(by)) stop("by must be a factor") force(xlab) if (missing(by)){ x <- na.omit(x) } else{ label.by <- deparse(substitute(by)) keep <- complete.cases(x, by) x <- x[keep] by <- by[keep] } if (missing(xlim)) xlim <- range(x) force(xlab) if (missing(breaks))breaks <- "Sturges" if (missing(by)) dotplot(x=x, bin=bin, breaks=breaks, xlim=xlim, xlab=xlab) else dotplot(x=x, by=by, bin=bin, breaks=breaks, xlim=xlim, xlab=xlab) }RcmdrMisc/R/readXL.R0000744000176200001440000000105412560142400013633 0ustar liggesusersreadXL <- function(file, rownames=FALSE, header=TRUE, na="", sheet=1, stringsAsFactors=default.stringsAsFactors()){ data <- readxl::read_excel(path=file, sheet=sheet, col_names=header, na=na) class(data) <- "data.frame" if (rownames){ rownames(data) <- data[, 1] data[[1]] <- NULL } colnames(data) <- make.names(colnames(data), unique=TRUE) if (stringsAsFactors){ char <- sapply(data, class) == "character" for (var in which(char)){ data[[var]] <- factor(data[[var]]) } } data }RcmdrMisc/R/Percents.R0000744000176200001440000000244712560142400014246 0ustar liggesusers# functions for computing percentage tables # last modified 2014-08-04 by J. Fox colPercents <- function(tab, digits=1){ dim <- length(dim(tab)) if (is.null(dimnames(tab))){ dims <- dim(tab) dimnames(tab) <- lapply(1:dim, function(i) 1:dims[i]) } sums <- apply(tab, 2:dim, sum) per <- apply(tab, 1, function(x) x/sums) dim(per) <- dim(tab)[c(2:dim,1)] per <- aperm(per, c(dim, 1:(dim-1))) dimnames(per) <- dimnames(tab) per <- round(100*per, digits) result <- abind(per, Total=apply(per, 2:dim, sum), Count=sums, along=1) names(dimnames(result)) <- names(dimnames(tab)) result } rowPercents <- function(tab, digits=1){ dim <- length(dim(tab)) if (dim == 2) return(t(colPercents(t(tab), digits=digits))) tab <- aperm(tab, c(2,1,3:dim)) aperm(colPercents(tab, digits=digits), c(2,1,3:dim)) } totPercents <- function(tab, digits=1){ dim <- length(dim(tab)) if (is.null(dimnames(tab))){ dims <- dim(tab) dimnames(tab) <- lapply(1:dim, function(i) 1:dims[i]) } tab <- 100*tab/sum(tab) tab <- cbind(tab, rowSums(tab)) tab <- rbind(tab, colSums(tab)) rownames(tab)[nrow(tab)] <- "Total" colnames(tab)[ncol(tab)] <- "Total" round(tab, digits=digits) }RcmdrMisc/R/numSummary.R0000744000176200001440000002220312560142400014630 0ustar liggesusers# various numeric summary statistics # last modified 2014-09-04 by J. Fox numSummary <- function(data, statistics=c("mean", "sd", "se(mean)", "IQR", "quantiles", "cv", "skewness", "kurtosis"), type=c("2", "1", "3"), quantiles=c(0, .25, .5, .75, 1), groups){ sd <- function(x, type, ...){ apply(as.matrix(x), 2, stats::sd, na.rm=TRUE) } IQR <- function(x, type, ...){ apply(as.matrix(x), 2, stats::IQR, na.rm=TRUE) } std.err.mean <- function(x, ...){ x <- as.matrix(x) sd <- sd(x) n <- colSums(!is.na(x)) sd/sqrt(n) } cv <- function(x, ...){ x <- as.matrix(x) mean <- colMeans(x, na.rm=TRUE) sd <- sd(x) if (any(x <= 0, na.rm=TRUE)) warning("not all values are positive") cv <- sd/mean cv[mean <= 0] <- NA cv } skewness <- function(x, type, ...){ if (is.vector(x)) return(e1071::skewness(x, type=type, na.rm=TRUE)) apply(x, 2, skewness, type=type) } kurtosis <- function(x, type, ...){ if (is.vector(x)) return(e1071::kurtosis(x, type=type, na.rm=TRUE)) apply(x, 2, kurtosis, type=type) } data <- as.data.frame(data) if (!missing(groups)) { groups <- as.factor(groups) counts <- table(groups) if (any(counts == 0)){ levels <- levels(groups) warning("the following groups are empty: ", paste(levels[counts == 0], collapse=", ")) groups <- factor(groups, levels=levels[counts != 0]) } } variables <- names(data) if (missing(statistics)) statistics <- c("mean", "sd", "quantiles", "IQR") statistics <- match.arg(statistics, c("mean", "sd", "se(mean)", "IQR", "quantiles", "cv", "skewness", "kurtosis"), several.ok=TRUE) type <- match.arg(type) type <- as.numeric(type) ngroups <- if(missing(groups)) 1 else length(grps <- levels(groups)) quantiles <- if ("quantiles" %in% statistics) quantiles else NULL quants <- if (length(quantiles) > 1) paste(100*quantiles, "%", sep="") else NULL # quants <- paste(100*quantiles, "%", sep="") nquants <- length(quants) stats <- c(c("mean", "sd", "se(mean)", "IQR", "cv", "skewness", "kurtosis")[c("mean", "sd", "se(mean)", "IQR", "cv", "skewness", "kurtosis") %in% statistics], quants) nstats <- length(stats) nvars <- length(variables) result <- list() if ((ngroups == 1) && (nvars == 1) && (length(statistics) == 1)){ if (statistics == "quantiles") table <- quantile(data[,variables], probs=quantiles, na.rm=TRUE) else { stats <- statistics stats[stats == "se(mean)"] <- "std.err.mean" table <- do.call(stats, list(x=data[,variables], na.rm=TRUE, type=type)) names(table) <- statistics } NAs <- sum(is.na(data[,variables])) n <- nrow(data) - NAs result$type <- 1 } else if ((ngroups > 1) && (nvars == 1) && (length(statistics) == 1)){ if (statistics == "quantiles"){ table <- matrix(unlist(tapply(data[, variables], groups, quantile, probs=quantiles, na.rm=TRUE)), ngroups, nquants, byrow=TRUE) rownames(table) <- grps colnames(table) <- quants } else table <- tapply(data[,variables], groups, statistics, na.rm=TRUE, type=type) NAs <- tapply(data[, variables], groups, function(x) sum(is.na(x))) n <- table(groups) - NAs result$type <- 2 } else if ((ngroups == 1) ){ X <- as.matrix(data[, variables]) table <- matrix(0, nvars, nstats) rownames(table) <- if (length(variables) > 1) variables else "" colnames(table) <- stats if ("mean" %in% stats) table[,"mean"] <- colMeans(X, na.rm=TRUE) if ("sd" %in% stats) table[,"sd"] <- sd(X) if ("se(mean)" %in% stats) table[, "se(mean)"] <- std.err.mean(X) if ("IQR" %in% stats) table[, "IQR"] <- IQR(X) if ("cv" %in% stats) table[,"cv"] <- cv(X) if ("skewness" %in% statistics) table[, "skewness"] <- skewness(X, type=type) if ("kurtosis" %in% statistics) table[, "kurtosis"] <- kurtosis(X, type=type) if ("quantiles" %in% statistics){ table[,quants] <- t(apply(data[, variables, drop=FALSE], 2, quantile, probs=quantiles, na.rm=TRUE)) } NAs <- colSums(is.na(data[, variables, drop=FALSE])) n <- nrow(data) - NAs result$type <- 3 } else { table <- array(0, c(ngroups, nstats, nvars), dimnames=list(Group=grps, Statistic=stats, Variable=variables)) NAs <- matrix(0, nvars, ngroups) rownames(NAs) <- variables colnames(NAs) <- grps for (variable in variables){ if ("mean" %in% stats) table[, "mean", variable] <- tapply(data[, variable], groups, mean, na.rm=TRUE) if ("sd" %in% stats) table[, "sd", variable] <- tapply(data[, variable], groups, sd, na.rm=TRUE) if ("se(mean)" %in% stats) table[, "se(mean)", variable] <- tapply(data[, variable], groups, std.err.mean, na.rm=TRUE) if ("IQR" %in% stats) table[, "IQR", variable] <- tapply(data[, variable], groups, IQR, na.rm=TRUE) if ("cv" %in% stats) table[, "cv", variable] <- tapply(data[, variable], groups, cv) if ("skewness" %in% stats) table[, "skewness", variable] <- tapply(data[, variable], groups, skewness, type=type) if ("kurtosis" %in% stats) table[, "kurtosis", variable] <- tapply(data[, variable], groups, kurtosis, type=type) if ("quantiles" %in% statistics) { res <- matrix(unlist(tapply(data[, variable], groups, quantile, probs=quantiles, na.rm=TRUE)), ngroups, nquants, byrow=TRUE) table[, quants, variable] <- res } NAs[variable,] <- tapply(data[, variable], groups, function(x) sum(is.na(x))) } if (nstats == 1) table <- table[,1,] if (nvars == 1) table <- table[,,1] n <- table(groups) n <- matrix(n, nrow=nrow(NAs), ncol=ncol(NAs), byrow=TRUE) n <- n - NAs result$type <- 4 } result$table <- table result$statistics <- statistics result$n <- n if (any(NAs > 0)) result$NAs <- NAs class(result) <- "numSummary" result } print.numSummary <- function(x, ...){ NAs <- x$NAs table <- x$table n <- x$n statistics <- x$statistics switch(x$type, "1" = { if (!is.null(NAs)) { table <- c(table, n, NAs) names(table)[length(table) - 1:0] <- c("n", "NA") } print(table) }, "2" = { if (statistics == "quantiles") { table <- cbind(table, n) colnames(table)[ncol(table)] <- "n" if (!is.null(NAs)) { table <- cbind(table, NAs) colnames(table)[ncol(table)] <- "NA" } } else { table <- rbind(table, n) rownames(table)[c(1, nrow(table))] <- c(statistics, "n") if (!is.null(NAs)) { table <- rbind(table, NAs) rownames(table)[nrow(table)] <- "NA" } table <- t(table) } print(table) }, "3" = { table <- cbind(table, n) colnames(table)[ncol(table)] <- "n" if (!is.null(NAs)) { table <- cbind(table, NAs) colnames(table)[ncol(table)] <- "NA" } print(table) }, "4" = { if (length(dim(table)) == 2){ n <- t(n) nms <- colnames(n) colnames(n) <- paste(nms, ":n", sep="") table <- cbind(table, n) if (!is.null(NAs)) { NAs <- t(NAs) nms <- colnames(NAs) colnames(NAs) <- paste(nms, ":NA", sep="") table <- cbind(table, NAs) } print(table) } else { table <- abind(table, t(n), along=2) dimnames(table)[[2]][dim(table)[2]] <- "n" if (!is.null(NAs)) { table <- abind(table, t(NAs), along=2) dimnames(table)[[2]][dim(table)[2]] <- "NA" } nms <- dimnames(table)[[3]] for (name in nms){ cat("\nVariable:", name, "\n") print(table[,,name]) } } } ) invisible(x) } RcmdrMisc/R/bin.var.R0000744000176200001440000000135112560142400014013 0ustar liggesusers# bin a numeric variable # Author: Dan Putler (revision by J. Fox, 5 Dec 04 & 5 Mar 13) # last modified 2014-08-04 bin.var <- function (x, bins=4, method=c("intervals", "proportions", "natural"), labels=FALSE){ method <- match.arg(method) if(length(x) < bins) { stop("The number of bins exceeds the number of data values") } x <- if(method == "intervals") cut(x, bins, labels=labels) else if (method == "proportions") cut(x, quantile(x, probs=seq(0,1,1/bins), na.rm=TRUE), include.lowest = TRUE, labels=labels) else { xx <- na.omit(x) breaks <- c(-Inf, tapply(xx, KMeans(xx, bins)$cluster, max)) cut(x, breaks, labels=labels) } as.factor(x) } RcmdrMisc/R/plots.R0000744000176200001440000002076012560142400013622 0ustar liggesusers# various high-level plots # last modified 2014-09-04 by J. Fox Hist <- function(x, groups, scale=c("frequency", "percent", "density"), xlab=deparse(substitute(x)), ylab=scale, main="", breaks="Sturges", ...){ xlab # evaluate scale <- match.arg(scale) ylab if (!missing(groups)){ counts <- table(groups) if (any(counts == 0)){ levels <- levels(groups) warning("the following groups are empty: ", paste(levels[counts == 0], collapse=", ")) } levels <- levels(groups) hists <- lapply(levels, function(level) if (counts[level] != 0) hist(x[groups == level], plot=FALSE, breaks=breaks) else list(breaks=NA)) range.x <- range(unlist(lapply(hists, function(hist) hist$breaks)), na.rm=TRUE) n.breaks <- max(sapply(hists, function(hist) length(hist$breaks))) breaks. <- seq(range.x[1], range.x[2], length=n.breaks) hists <- lapply(levels, function(level) if (counts[level] != 0) hist(x[groups == level], plot=FALSE, breaks=breaks.) else list(counts=0, density=0)) ylim <- if (scale == "frequency"){ max(sapply(hists, function(hist) max(hist$counts))) } else if (scale == "density"){ max(sapply(hists, function(hist) max(hist$density))) } else { max.counts <- sapply(hists, function(hist) max(hist$counts)) tot.counts <- sapply(hists, function(hist) sum(hist$counts)) ylims <- tot.counts*(max(max.counts[tot.counts != 0]/tot.counts[tot.counts != 0])) names(ylims) <- levels ylims } save.par <- par(mfrow=n2mfrow(sum(counts != 0)), oma = c(0, 0, if (main != "") 1.5 else 0, 0)) on.exit(par(save.par)) for (level in levels){ if (counts[level] == 0) next if (scale != "percent") Hist(x[groups == level], scale=scale, xlab=xlab, ylab=ylab, main=paste(deparse(substitute(groups)), "=", level), breaks=breaks., ylim=c(0, ylim), ...) else Hist(x[groups == level], scale=scale, xlab=xlab, ylab=ylab, main=paste(deparse(substitute(groups)), "=", level), breaks=breaks., ylim=c(0, ylim[level]), ...) } if (main != "") mtext(side = 3, outer = TRUE, main, cex = 1.2) return(invisible(NULL)) } x <- na.omit(x) if (scale == "frequency") hist(x, xlab=xlab, ylab=ylab, main=main, breaks=breaks, ...) else if (scale == "density") hist(x, freq=FALSE, xlab=xlab, ylab=ylab, main=main, breaks=breaks, ...) else { n <- length(x) hist(x, axes=FALSE, xlab=xlab, ylab=ylab, main=main, breaks=breaks, ...) axis(1) max <- ceiling(10*par("usr")[4]/n) at <- if (max <= 3) (0:(2*max))/20 else (0:max)/10 axis(2, at=at*n, labels=at*100) } box() abline(h=0) invisible(NULL) } indexplot <- function(x, labels=seq_along(x), id.method="y", type="h", id.n=0, ylab, ...){ if (missing(ylab)) ylab <- deparse(substitute(x)) plot(x, type=type, ylab=ylab, xlab="Observation Index", ...) if (par("usr")[3] <= 0) abline(h=0, col='gray') ids <- showLabels(seq_along(x), x, labels=labels, id.method=id.method, id.n=id.n) if (is.null(ids)) return(invisible(NULL)) else return(ids) } lineplot <- function(x, ..., legend){ xlab <- deparse(substitute(x)) y <- cbind(...) m <- ncol(y) legend <- if (missing(legend)) m > 1 if (legend && m > 1) { mar <- par("mar") top <- 3.5 + m old.mar <- par(mar=c(mar[1:2], top, mar[4])) on.exit(par(old.mar)) } if (m > 1) matplot(x, y, type="b", lty=1, xlab=xlab, ylab="") else plot(x, y, type="b", pch=16, xlab=xlab, ylab=colnames(y)) if (legend && ncol(y) > 1){ xpd <- par(xpd=TRUE) on.exit(par(xpd), add=TRUE) ncols <- length(palette()) cols <- rep(1:ncols, 1 + m %/% ncols)[1:m] usr <- par("usr") legend(usr[1], usr[4] + 1.2*top*strheight("x"), legend=colnames(y), col=cols, lty=1, pch=as.character(1:m)) } return(invisible(NULL)) } plotDistr <- function(x, p, discrete=FALSE, cdf=FALSE, ...){ if (discrete){ if (cdf){ plot(x, p, ..., type="n") abline(h=0:1, col="gray") lines(x, p, ..., type="s") } else { plot(x, p, ..., type="h") points(x, p, pch=16) abline(h=0, col="gray") } } else{ if (cdf){ plot(x, p, ..., type="n") abline(h=0:1, col="gray") lines(x, p, ..., type="l") } else{ plot(x, p, ..., type="n") abline(h=0, col="gray") lines(x, p, ..., type="l") } } return(invisible(NULL)) } plotMeans <- function(response, factor1, factor2, error.bars = c("se", "sd", "conf.int", "none"), level=0.95, xlab=deparse(substitute(factor1)), ylab=paste("mean of", deparse(substitute(response))), legend.lab=deparse(substitute(factor2)), main="Plot of Means", pch=1:n.levs.2, lty=1:n.levs.2, col=palette(), ...){ if (!is.numeric(response)) stop("Argument response must be numeric.") xlab # force evaluation ylab legend.lab error.bars <- match.arg(error.bars) if (missing(factor2)){ if (!is.factor(factor1)) stop("Argument factor1 must be a factor.") valid <- complete.cases(factor1, response) factor1 <- factor1[valid] response <- response[valid] means <- tapply(response, factor1, mean) sds <- tapply(response, factor1, sd) ns <- tapply(response, factor1, length) if (error.bars == "se") sds <- sds/sqrt(ns) if (error.bars == "conf.int") sds <- qt((1 - level)/2, df=ns - 1, lower.tail=FALSE) * sds/sqrt(ns) sds[is.na(sds)] <- 0 yrange <- if (error.bars != "none") c( min(means - sds, na.rm=TRUE), max(means + sds, na.rm=TRUE)) else range(means, na.rm=TRUE) levs <- levels(factor1) n.levs <- length(levs) plot(c(1, n.levs), yrange, type="n", xlab=xlab, ylab=ylab, axes=FALSE, main=main, ...) points(1:n.levs, means, type="b", pch=16, cex=2) box() axis(2) axis(1, at=1:n.levs, labels=levs) if (error.bars != "none") arrows(1:n.levs, means - sds, 1:n.levs, means + sds, angle=90, lty=2, code=3, length=0.125) } else { if (!(is.factor(factor1) | is.factor(factor2))) stop("Arguments factor1 and factor2 must be factors.") valid <- complete.cases(factor1, factor2, response) factor1 <- factor1[valid] factor2 <- factor2[valid] response <- response[valid] means <- tapply(response, list(factor1, factor2), mean) sds <- tapply(response, list(factor1, factor2), sd) ns <- tapply(response, list(factor1, factor2), length) if (error.bars == "se") sds <- sds/sqrt(ns) if (error.bars == "conf.int") sds <- qt((1 - level)/2, df=ns - 1, lower.tail=FALSE) * sds/sqrt(ns) sds[is.na(sds)] <- 0 yrange <- if (error.bars != "none") c( min(means - sds, na.rm=TRUE), max(means + sds, na.rm=TRUE)) else range(means, na.rm=TRUE) levs.1 <- levels(factor1) levs.2 <- levels(factor2) n.levs.1 <- length(levs.1) n.levs.2 <- length(levs.2) if (length(pch) == 1) pch <- rep(pch, n.levs.2) if (length(col) == 1) col <- rep(col, n.levs.2) if (length(lty) == 1) lty <- rep(lty, n.levs.2) if (n.levs.2 > length(col)) stop(sprintf("Number of groups for factor2, %d, exceeds number of distinct colours, %d."), n.levs.2, length(col)) plot(c(1, n.levs.1 * 1.4), yrange, type="n", xlab=xlab, ylab=ylab, axes=FALSE, main=main, ...) box() axis(2) axis(1, at=1:n.levs.1, labels=levs.1) for (i in 1:n.levs.2){ points(1:n.levs.1, means[, i], type="b", pch=pch[i], cex=2, col=col[i], lty=lty[i]) if (error.bars != "none") arrows(1:n.levs.1, means[, i] - sds[, i], 1:n.levs.1, means[, i] + sds[, i], angle=90, code=3, col=col[i], lty=lty[i], length=0.125) } x.posn <- n.levs.1 * 1.1 y.posn <- sum(c(0.1, 0.9) * par("usr")[c(3,4)]) text(x.posn, y.posn, legend.lab, adj=c(0, -.5)) legend(x.posn, y.posn, levs.2, pch=pch, col=col, lty=lty) } invisible(NULL) } RcmdrMisc/R/rcorr.adjust.R0000744000176200001440000000316412560142400015100 0ustar liggesusers# the following function is adapted from a suggestion by Robert Muenchen # uses rcorr in the Hmisc package # last modified 2014-09-04 by J. Fox rcorr.adjust <- function (x, type = c("pearson", "spearman"), use = c("complete.obs", "pairwise.complete.obs")) { opt <- options(scipen = 5) on.exit(options(opt)) type <- match.arg(type) use <- match.arg(use) x <- if (use == "complete.obs") as.matrix(na.omit(x)) else as.matrix(x) R <- rcorr(x, type = type) P <- P.unadj <- R$P p <- P[lower.tri(P)] adj.p <- p.adjust(p, method = "holm") P[lower.tri(P)] <- adj.p P[upper.tri(P)] <- 0 P <- P + t(P) P <- ifelse(P < 1e-04, 0, P) P <- format(round(P, 4)) diag(P) <- "" P[c(grep("0.0000", P), grep("^ 0$", P))] <- "<.0001" P[grep("0.000$", P)] <- "<.001" P.unadj <- ifelse(P.unadj < 1e-04, 0, P.unadj) P.unadj <- format(round(P.unadj, 4)) diag(P.unadj) <- "" P.unadj[c(grep("0.0000$", P.unadj), grep("^ 0$", P.unadj))] <- "<.0001" P.unadj[grep("0.000$", P.unadj)] <- "<.001" result <- list(R = R, P = P, P.unadj = P.unadj, type = type) class(result) <- "rcorr.adjust" result } print.rcorr.adjust <- function(x, ...){ cat("\n", if (x$type == "pearson") "Pearson" else "Spearman", "correlations:\n") print(round(x$R$r, 4)) cat("\n Number of observations: ") n <- x$R$n if (all(n[1] == n)) cat(n[1], "\n") else{ cat("\n") print(n) } cat("\n Pairwise two-sided p-values:\n") print(x$P.unadj, quote=FALSE) cat("\n Adjusted p-values (Holm's method)\n") print(x$P, quote=FALSE) } RcmdrMisc/R/cluster.R0000744000176200001440000000170212560142400014135 0ustar liggesusers# this code originally by Dan Putler, used with permission # last modified 2012-12-06 by J. Fox assignCluster <- function(clusterData, origData, clusterVec){ rowsDX <- row.names(clusterData) rowsX <- row.names(origData) clustAssign <- rep(NA, length(rowsX)) validData <- rowsX %in% rowsDX clustAssign[validData] <- clusterVec return(as.factor(clustAssign)) } KMeans <- function (x, centers, iter.max=10, num.seeds=10) { # fixed 15 Mar 05 by J. Fox if(mode(x)=="numeric") x<-data.frame(new.x=x) KM <- kmeans(x=x, centers=centers, iter.max=iter.max) for(i in 2:num.seeds) { newKM <- kmeans(x=x, centers=centers, iter.max=iter.max) if(sum(newKM$withinss) < sum(KM$withinss)) { KM <- newKM } } KM$tot.withinss <- sum(KM$withinss) xmean <- apply(x, 2, mean) centers <- rbind(KM$centers, xmean) bss1 <- as.matrix(dist(centers)^2) KM$betweenss <- sum(as.vector(bss1[nrow(bss1),])*c(KM$size,0)) return(KM) } RcmdrMisc/R/mergeRows.R0000744000176200001440000000112712560142400014427 0ustar liggesusers# simple row-wise merge of data frames # last modified 2014-08-04 by J. Fox mergeRows <- function(X, Y, common.only=FALSE, ...){ UseMethod("mergeRows") } mergeRows.data.frame <- function(X, Y, common.only=FALSE, ...){ cols1 <- names(X) cols2 <- names(Y) if (common.only){ common <- intersect(cols1, cols2) rbind(X[, common], Y[, common]) } else { all <- union(cols1, cols2) miss1 <- setdiff(all, cols1) miss2 <- setdiff(all, cols2) X[, miss1] <- NA Y[, miss2] <- NA rbind(X, Y) } }RcmdrMisc/R/partial.cor.R0000744000176200001440000000362612560142400014701 0ustar liggesusers# last modified 2014-09-04 by J. Fox partial.cor <- function(X, tests=FALSE, use=c("complete.obs", "pairwise.complete.obs")){ countValid <- function(X){ X <- !is.na(X) t(X) %*% X } use <- match.arg(use) if (use == "complete.obs"){ X <- na.omit(X) n <- nrow(X) } else n <- countValid(X) R <- cor(X, use=use) RI <- solve(R) D <- 1/sqrt(diag(RI)) R <- - RI * (D %o% D) diag(R) <- 0 rownames(R) <- colnames(R) <- colnames(X) result <- list(R=R, n=n, P=NULL, P.unadj=NULL) if (tests){ opt <- options(scipen=5) on.exit(options(opt)) df <- n - ncol(X) f <- (R^2)*df/(1 - R^2) P <- P.unadj <- pf(f, 1, df, lower.tail=FALSE) p <- P[lower.tri(P)] adj.p <- p.adjust(p, method="holm") P[lower.tri(P)] <- adj.p P[upper.tri(P)] <- 0 P <- P + t(P) P <- ifelse(P < 1e-04, 0, P) P <- format(round(P, 4)) diag(P) <- "" P[c(grep("0.0000", P), grep("^ 0$", P))] <- "<.0001" P.unadj <- ifelse(P.unadj < 1e-04, 0, P.unadj) P.unadj <- format(round(P.unadj, 4)) diag(P.unadj) <- "" P.unadj[c(grep("0.0000", P.unadj), grep("^ 0$", P.unadj))] <- "<.0001" result$P <- P result$P.unadj <- P.unadj } class(result) <- "partial.cor" result } print.partial.cor <- function(x, digits=max(3, getOption("digits") - 2), ...){ cat("\n Partial correlations:\n") print(round(x$R, digits, ...)) cat("\n Number of observations: ") n <- x$n if (all(n[1] == n)) cat(n[1], "\n") else{ cat("\n") print(n) } if (!is.null(x$P)){ cat("\n Pairwise two-sided p-values:\n") print(x$P.unadj, quote=FALSE) cat("\n Adjusted p-values (Holm's method)\n") print(x$P, quote=FALSE) } x }RcmdrMisc/R/summarySandwich.R0000744000176200001440000000142212560142400015631 0ustar liggesusers# last modified 2014-08-09 by J. Fox summarySandwich <- function(model, ...){ UseMethod("summarySandwich") } summarySandwich.lm <- function(model, type=c("hc3", "hc0", "hc1", "hc2", "hc4", "hac"), ...){ s <- summary(model) c <- coef(s) type <- match.arg(type) v <- if (type != "hac") hccm(model, type=type, ...) else vcovHAC(model, ...) c[, 2] <- sqrt(diag(v)) c[, 3] <- c[,1]/c[,2] c[, 4] <- 2*pt(abs(c[,3]), df=s$df[2], lower.tail=FALSE) colnames(c)[2] <- paste("Std.Err(", type, ")", sep="") s$coefficients <- c coefs <- names(coef(model)) coefs <- coefs[coefs != "(Intercept)"] h <- linearHypothesis(model, coefs, vcov.=v) s$fstatistic <- c(value=h$F[2], numdf=length(coefs), dendf=s$df[2]) s }RcmdrMisc/R/stepwise.R0000744000176200001440000000161212560142400014317 0ustar liggesusers# wrapper for stepAIC in the MASS package # last modified 2014-08-04 by J. Fox stepwise <- function(mod, direction=c("backward/forward", "forward/backward", "backward", "forward"), criterion=c("BIC", "AIC"), ...){ criterion <- match.arg(criterion) direction <- match.arg(direction) cat("\nDirection: ", direction) cat("\nCriterion: ", criterion, "\n\n") k <- if (criterion == "BIC") log(nrow(model.matrix(mod))) else 2 rhs <- paste(c("~", deparse(formula(mod)[[3]])), collapse="") rhs <- gsub(" ", "", rhs) if (direction == "forward" || direction == "forward/backward") mod <- update(mod, . ~ 1) if (direction == "backward/forward" || direction == "forward/backward") direction <- "both" lower <- ~ 1 upper <- eval(parse(text=rhs)) stepAIC(mod, scope=list(lower=lower, upper=upper), direction=direction, k=k, ...) }RcmdrMisc/R/Barplot.R0000744000176200001440000000204112560142400014054 0ustar liggesusersBarplot <- function(x, by, scale=c("frequency", "percent"), style=c("divided", "parallel"), col=rainbow_hcl(length(levels(by))), xlab=deparse(substitute(x)), legend.title=deparse(substitute(by)), ylab=scale, legend.pos="topright"){ if (!is.factor(x)) stop("x must be a factor") if (!missing(by) && !is.factor(by)) stop("by must be a factor") scale <- match.arg(scale) style <- match.arg(style) if (missing(by)){ y <- table(x) if (scale == "percent") y <- 100*y/sum(y) barplot(y, xlab=xlab, ylab=ylab) } else{ col <- col[1:length(levels(by))] y <- table(by, x) if (scale == "percent") y <- 100*y/sum(y) barplot(y, xlab=xlab, ylab=ylab, legend.text=levels(by), col=col, args.legend=list(x=legend.pos, title=legend.title, inset=0.05), beside = style == "parallel") } return(invisible(NULL)) }RcmdrMisc/R/reliability.R0000744000176200001440000000324112560142400014765 0ustar liggesusers# last modified 2014-08-04 by J. Fox reliability <- function(S){ reliab <- function(S, R){ k <- dim(S)[1] ones <- rep(1, k) v <- as.vector(ones %*% S %*% ones) alpha <- (k/(k - 1)) * (1 - (1/v)*sum(diag(S))) rbar <- mean(R[lower.tri(R)]) std.alpha <- k*rbar/(1 + (k - 1)*rbar) c(alpha=alpha, std.alpha=std.alpha) } result <- list() if ((!is.numeric(S)) || !is.matrix(S) || (nrow(S) != ncol(S)) || any(abs(S - t(S)) > max(abs(S))*1e-10) || nrow(S) < 2) stop("argument must be a square, symmetric, numeric covariance matrix") k <- dim(S)[1] s <- sqrt(diag(S)) R <- S/(s %o% s) rel <- reliab(S, R) result$alpha <- rel[1] result$st.alpha <- rel[2] if (k < 3) { warning("there are fewer than 3 items in the scale") return(invisible(NULL)) } rel <- matrix(0, k, 3) for (i in 1:k) { rel[i, c(1,2)] <- reliab(S[-i, -i], R[-i, -i]) a <- rep(0, k) b <- rep(1, k) a[i] <- 1 b[i] <- 0 cov <- a %*% S %*% b var <- b %*% S %*% b rel[i, 3] <- cov/(sqrt(var * S[i,i])) } rownames(rel) <- rownames(S) colnames(rel) <- c("Alpha", "Std.Alpha", "r(item, total)") result$rel.matrix <- rel class(result) <- "reliability" result } print.reliability <- function(x, digits=4, ...){ cat(paste("Alpha reliability = ", round(x$alpha, digits), "\n")) cat(paste("Standardized alpha = ", round(x$st.alpha, digits), "\n")) cat("\nReliability deleting each item in turn:\n") print(round(x$rel.matrix, digits)) invisible(x) } RcmdrMisc/MD50000644000176200001440000000335012560163414012450 0ustar liggesuserscf248f3683758988792abe4ad15b1aab *DESCRIPTION dd153e44ba11791e6baef8231426aa4c *NAMESPACE 19b0729e12cfcfff161b791ba6691619 *NEWS fd81290fa02e3b1f0e81993907ee1c80 *R/Barplot.R 63eb64573825683cee20f5a7848eab39 *R/Dotplot.R a0408318e2aa9bb4ddf037be7b66255c *R/Percents.R bbfd6fa51d82e168a23c6307b1ea6c13 *R/bin.var.R d5be12a5d8c6ef09c2c9d050e4865df5 *R/cluster.R f4515f68d69bace7fc85bf83c9068458 *R/mergeRows.R bdbbbb73702aa64b6ab964cec1696e30 *R/numSummary.R c6fe3e403873c35c39a7f014c7dc0bd8 *R/partial.cor.R 090d8907e6468aec4b5ea8c405acf904 *R/plots.R 23cd6a3d3b767787f5e3ba8515bce73e *R/rcorr.adjust.R 62ed100fa9c747db37575ada8a7d0e9c *R/readXL.R 6324990e38f450311b9a0124f0ab4597 *R/reliability.R 8bed27ded7fbb5e2d93a0416384dbc7f *R/stepwise.R 6381f87fd82dfdfb2a549338752eee3b *R/summarySandwich.R 17a140da0c328c4b956af8910177658f *man/Barplot.Rd de563bce92b5336e0975d893d0beaa15 *man/Dotplot.Rd edb3109ea83354e7eff2145c9609eeeb *man/Hist.Rd 751abef96e6a32d368beed566f3b23b5 *man/KMeans.Rd 2feae98a74ae135b24bbdc984763fcba *man/assignCluster.Rd 80cad42c33aac725e6f2121ae30c4588 *man/bin.var.Rd 459107b8e209f6f4a0bc5b47ac8360b6 *man/colPercents.Rd 5d5755cc679f47a1f796ec8eda170bb7 *man/indexplot.Rd 601ef286c343e596fe03cb5536e70739 *man/lineplot.Rd 5ff34b1ee73e5bf6efa9931842c6367e *man/mergeRows.Rd a89ca2623ab4ad585c41a5982349cba7 *man/numSummary.Rd ae327bd098374e1c98ee28c9aeb3392f *man/partial.cor.Rd ad38a28b4c3c71137d90dc88c3f01e83 *man/plotDistr.Rd cd0ad6c8555d153ecbff3979b4e5d153 *man/plotMeans.Rd d189401d75242865bff516beedfff5c6 *man/rcorr.adjust.Rd ff1b04eba45998f0a4bf6ebde239c3a4 *man/readXL.Rd a31489ecbd5b326b00a681d8bca952a6 *man/reliability.Rd 23590ec77218dca82b8c9470d89a188e *man/stepwise.Rd 97eae6433570114ec3b6e85b3f14d067 *man/summarySandwich.Rd RcmdrMisc/DESCRIPTION0000744000176200001440000000156312560163414013653 0ustar liggesusersPackage: RcmdrMisc Version: 1.0-3 Date: 2015-07-13 Title: R Commander Miscellaneous Functions Authors@R: c(person("John", "Fox", role = c("aut", "cre"), email = "jfox@mcmaster.ca"), person("Robert", "Muenchen", role = "ctb"), person("Dan", "Putler", role = "ctb") ) Depends: R (>= 3.0.0), utils, car, sandwich Imports: abind, colorspace, Hmisc, MASS, e1071, readxl, graphics, grDevices, stats ByteCompile: yes Description: Various statistical, graphics, and data-management functions used by the Rcmdr package in the R Commander GUI for R. License: GPL (>= 2) URL: http://www.r-project.org, http://socserv.socsci.mcmaster.ca/jfox/ NeedsCompilation: no Packaged: 2015-08-04 13:58:24 UTC; User Author: John Fox [aut, cre], Robert Muenchen [ctb], Dan Putler [ctb] Maintainer: John Fox Repository: CRAN Date/Publication: 2015-08-04 18:23:40 RcmdrMisc/man/0000755000176200001440000000000012560142400012702 5ustar liggesusersRcmdrMisc/man/stepwise.Rd0000744000176200001440000000424412560142400015041 0ustar liggesusers\name{stepwise} \Rdversion{1.1} \alias{stepwise} \title{ Stepwise Model Selection } \description{ This function is a front end to the \code{\link{stepAIC}} function in the \pkg{MASS} package. } \usage{ stepwise(mod, direction = c("backward/forward", "forward/backward", "backward", "forward"), criterion = c("BIC", "AIC"), ...) } \arguments{ \item{mod}{a model object of a class that can be handled by \code{stepAIC}.} \item{direction}{if \code{"backward/forward"} (the default), selection starts with the full model and eliminates predictors one at a time, at each step considering whether the criterion will be improved by adding back in a variable removed at a previous step; if \code{"forward/backwards"}, selection starts with a model including only a constant, and adds predictors one at a time, at each step considering whether the criterion will be improved by removing a previously added variable; \code{"backwards"} and \code{"forward"} are similar without the reconsideration at each step.} \item{criterion}{for selection. Either \code{"BIC"} (the default) or \code{"AIC"}. Note that \code{stepAIC} labels the criterion in the output as \code{"AIC"} regardless of which criterion is employed.} \item{...}{arguments to be passed to \code{stepAIC}.} } \value{ The model selected by \code{stepAIC}. } \references{ W. N. Venables and B. D. Ripley \emph{Modern Applied Statistics Statistics with S, Fourth Edition} Springer, 2002. } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link{stepAIC}}} \examples{ # adapted from ?stepAIC in MASS if (require(MASS)){ data(birthwt) bwt <- with(birthwt, { race <- factor(race, labels = c("white", "black", "other")) ptd <- factor(ptl > 0) ftv <- factor(ftv) levels(ftv)[-(1:2)] <- "2+" data.frame(low = factor(low), age, lwt, race, smoke = (smoke > 0), ptd, ht = (ht > 0), ui = (ui > 0), ftv) }) birthwt.glm <- glm(low ~ ., family = binomial, data = bwt) print(stepwise(birthwt.glm, trace = FALSE)) print(stepwise(birthwt.glm, direction="forward/backward")) } } \keyword{models} RcmdrMisc/man/rcorr.adjust.Rd0000744000176200001440000000320012560142400015605 0ustar liggesusers\name{rcorr.adjust} \Rdversion{1.1} \alias{rcorr.adjust} \alias{print.rcorr.adjust} \title{ Compute Pearson or Spearman Correlations with p-Values } \description{ This function uses the \code{\link[Hmisc]{rcorr}} function in the \pkg{Hmisc} package to compute matrices of Pearson or Spearman correlations along with the pairwise p-values among the correlations. The p-values are corrected for multiple inference using Holm's method (see \code{\link[stats]{p.adjust}}). Observations are filtered for missing data, and only complete observations are used. } \usage{ rcorr.adjust(x, type = c("pearson", "spearman"), use=c("complete.obs", "pairwise.complete.obs")) \method{print}{rcorr.adjust}(x, ...) } \arguments{ \item{x}{a numeric matrix or data frame, or an object of class \code{"rcorr.adjust"} to be printed.} \item{type}{\code{"pearson"} or \code{"spearman"}, depending upon the type of correlations desired; the default is \code{"pearson"}.} \item{use}{how to handle missing data: \code{"complete.obs"}, the default, use only complete cases; \code{"pairwise.complete.obs"}, use all cases with valid data for each pair.} \item{...}{not used.} } \value{ Returns an object of class \code{"rcorr.adjust"}, which is normally just printed. } \author{ John Fox, adapting code from Robert A. Muenchen. } \seealso{ \code{\link[Hmisc]{rcorr}}, \code{\link[stats]{p.adjust}}. } \examples{ if (require(car)){ data(Mroz) rcorr.adjust(Mroz[,c("k5", "k618", "age", "lwg", "inc")]) rcorr.adjust(Mroz[,c("k5", "k618", "age", "lwg", "inc")], type="spearman") } } \keyword{ htest } RcmdrMisc/man/bin.var.Rd0000744000176200001440000000232512560142400014533 0ustar liggesusers\name{bin.var} \alias{bin.var} \title{Bin a Numeric Varisible} \description{ Create a factor dissecting the range of a numeric variable into bins of equal width, (roughly) equal frequency, or at "natural" cut points. The \code{\link[base]{cut}} function is used to create the factor. } \usage{ bin.var(x, bins = 4, method = c("intervals", "proportions", "natural"), labels = FALSE) } \arguments{ \item{x}{numeric variable to be binned.} \item{bins}{number of bins.} \item{method}{one of \code{"intervals"} for equal-width bins; \code{"proportions"} for equal-count bins; \code{"natural"} for cut points between bins to be determined by a k-means clustering.} \item{labels}{if \code{FALSE}, numeric labels will be used for the factor levels; if \code{NULL}, the cut points are used to define labels; otherwise a character vector of level names.} } \value{ A factor. } \author{Dan Putler, slightly modified by John Fox \email{jfox@mcmaster.ca} with the original author's permission.} \seealso{\code{\link[base]{cut}}, \code{\link[stats]{kmeans}}.} \examples{ summary(bin.var(rnorm(100), method="prop", labels=letters[1:4])) } \keyword{manip} RcmdrMisc/man/KMeans.Rd0000744000176200001440000000265112560142400014354 0ustar liggesusers\name{KMeans} \alias{KMeans} \title{K-Means Clustering Using Multiple Random Seeds} \description{ Finds a number of k-means clusting solutions using R's \code{kmeans} function, and selects as the final solution the one that has the minimum total within-cluster sum of squared distances. } \usage{ KMeans(x, centers, iter.max=10, num.seeds=10) } \arguments{ \item{x}{A numeric matrix of data, or an object that can be coerced to such a matrix (such as a numeric vector or a dataframe with all numeric columns).} \item{centers}{The number of clusters in the solution.} \item{iter.max}{The maximum number of iterations allowed.} \item{num.seeds}{The number of different starting random seeds to use. Each random seed results in a different k-means solution.} } \value{ A list with components: \item{cluster}{A vector of integers indicating the cluster to which each point is allocated.} \item{centers}{A matrix of cluster centres (centroids).} \item{withinss}{The within-cluster sum of squares for each cluster.} \item{tot.withinss}{The within-cluster sum of squares summed across clusters.} \item{betweenss}{The between-cluster sum of squared distances.} \item{size}{The number of points in each cluster.} } \author{Dan Putler} \seealso{\code{\link[stats]{kmeans}}} \examples{ data(USArrests) KMeans(USArrests, centers=3, iter.max=5, num.seeds=5) } \keyword{misc} RcmdrMisc/man/plotDistr.Rd0000744000176200001440000000175312560142400015164 0ustar liggesusers\name{plotDistr} \alias{plotDistr} \title{ Plot a probability density, mass, or distribution function. } \description{ This function plots a probability density, mass, or distribution function, adapting the form of the plot as appropriate. } \usage{ plotDistr(x, p, discrete=FALSE, cdf=FALSE, ...) } \arguments{ \item{x}{horizontal coordinates} \item{p}{vertical coordinates} \item{discrete}{is the random variable discrete?} \item{cdf}{is this a cumulative distribution (as opposed to mass) function?} \item{\dots}{arguments to be passed to \code{plot}.} } \value{ Produces a plot; returns \code{NULL} invisibly. } \author{ John Fox \email{jfox@mcmaster.ca} } \examples{ x <- seq(-4, 4, length=100) plotDistr(x, dnorm(x), xlab="Z", ylab="p(z)", main="Standard Normal Density") x <- 0:10 plotDistr(x, pbinom(x, 10, 0.5), xlab="successes", discrete=TRUE, cdf=TRUE, main="Binomial Distribution Function, p=0.5, n=10") } \keyword{hplot} RcmdrMisc/man/Barplot.Rd0000744000176200001440000000330312560142400014574 0ustar liggesusers\name{Barplot} \alias{Barplot} \title{ Bar Plots } \description{ Create bar plots for one or two factors scaled by frequency or precentages. In the case of two factors, the bars can be divided (stacked) or plotted in parallel (side-by-side). This function is a front end to \code{\link{barplot}} in the \pkg{graphics} package.} \usage{ Barplot(x, by, scale = c("frequency", "percent"), style = c("divided", "parallel"), col = rainbow_hcl(length(levels(by))), xlab = deparse(substitute(x)), legend.title = deparse(substitute(by)), ylab = scale, legend.pos = "topright") } \arguments{ \item{x}{ a factor.} \item{by}{ optionally, a second factor.} \item{scale}{ either \code{"frequency"} (the default) or \code{"percent"}.} \item{style}{ for two-factor plots, either \code{"divided"} (the default) or \code{"parallel"}.} \item{col}{ colors for the \code{by} factor in two-factor plots; defaults to colors provided by \code{\link{rainbow_hcl}} in the \pkg{colorspace} package.} \item{xlab}{ an optional character string providing a label for the horizontal axis.} \item{legend.title}{ an optional character string providing a title for the legend.} \item{ylab}{ an optional character string providing a label for the vertical axis.} \item{legend.pos}{ position of the legend, in a form acceptable to the \code{\link{legend}} function.} } \value{ Returns \code{NULL} invisibly. } \author{ John Fox \email{jfox@mcmaster.ca} } \seealso{ \code{\link{barplot}}, \code{\link{legend}}, \code{\link{rainbow_hcl}} } \examples{ if (require(car)){ data(Mroz) with(Mroz, { Barplot(wc) Barplot(wc, by=hc) Barplot(wc, by=hc, style="parallel", scale="percent") }) } } \keyword{hplot} RcmdrMisc/man/summarySandwich.Rd0000744000176200001440000000305512560142400016353 0ustar liggesusers\name{summarySandwich} \alias{summarySandwich} \alias{summarySandwich.lm} \title{Linear Model Summary with Sandwich Standard Errors} \description{ \code{summarySandwich} creates a summary of a \code{"lm"} object similar to the standard one, with sandwich estimates of the coefficient standard errors in the place of the usual OLS standard errors, also modifying as a consequence the reported t-tests and p-values for the coefficients. Standard errors may be computed from a heteroscedasticity-consistent ("HC") covariance matrix for the coefficients (of several varieties), or from a heteroscedasticity-and-autocorrelation-consistent ("HAC") covariance matrix. } \usage{ summarySandwich(model, ...) \method{summarySandwich}{lm}(model, type=c("hc3", "hc0", "hc1", "hc2", "hc4", "hac"), ...) } \arguments{ \item{model}{a linear-model object.} \item{type}{type of sandwich standard errors to be computed; see \code{\link{hccm}} in the \pkg{car} package, and \code{\link{vcovHAC}} in the \pkg{sandwich} package, for details.} \item{...}{arguments to be passed to \code{hccm} or \code{vcovHAC}} } \value{ an object of class \code{"summary.lm"}, with sandwich standard errors substituted for the usual OLS standard errors; the omnibus F-test is similarly adjusted. } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link[car]{hccm}}, \code{\link[sandwich]{vcovHAC}}.} \examples{ mod <- lm(prestige ~ income + education + type, data=Prestige) summary(mod) summarySandwich(mod) } \keyword{misc} RcmdrMisc/man/readXL.Rd0000744000176200001440000000267112560142400014357 0ustar liggesusers\name{readXL} \alias{readXL} \alias{excel_sheets} \title{ Read an Excel File } \description{ \code{readXL} reads an Excel file, either of type \code{.xls} or \code{.xlsx} into an R data frame; it provides a front end to the \code{\link{read_excel}} function in the \pkg{readxl} package. \code{\link[readxl]{excel_sheets}} is re-exported from the \pkg{readxl} package and reports the names of spreadsheets in an Excel file.} \usage{ readXL(file, rownames = FALSE, header = TRUE, na = "", sheet = 1, stringsAsFactors = default.stringsAsFactors()) excel_sheets(path) } \arguments{ \item{file, path}{path to an Excel file.} \item{rownames}{if \code{TRUE} (the default is \code{FALSE}), the first column in the spreadsheet contains row names.} \item{header}{if \code{TRUE} (the default), the first row in the spreadsheet contains column (variable) names.} \item{na}{character string denoting missing data; the default is the empty string, \code{""}.} \item{sheet}{number of the spreadsheet in the file containing the data to be read; the default is \code{1}.} \item{stringsAsFactors}{if \code{TRUE} then columns containing character data are converted to factors; the default is taken from \code{default.stringsAsFactors()}.} } \value{ a data frame } \author{ John Fox \email{jfox@mcmaster.ca} } \seealso{ \code{\link{read_excel}}, \code{\link[readxl]{excel_sheets}} } \keyword{manip} RcmdrMisc/man/colPercents.Rd0000744000176200001440000000232512560142400015455 0ustar liggesusers\name{colPercents} \alias{colPercents} \alias{rowPercents} \alias{totPercents} \title{Row, Column, and Total Percentage Tables} \description{ Percentage a matrix or higher-dimensional array of frequency counts by rows, columns, or total frequency. } \usage{ colPercents(tab, digits=1) rowPercents(tab, digits=1) totPercents(tab, digits=1) } \arguments{ \item{tab}{a matrix or higher-dimensional array of frequency counts.} \item{digits}{number of places to the right of the decimal place for percentages.} } \value{ Returns an array of the same size and shape as \code{tab} percentaged by rows or columns, plus rows or columns of totals and counts, or by the table total. } \examples{ if (require(car)){ data(Mroz) # from car package cat("\n\n column percents:\n") print(colPercents(xtabs(~ lfp + wc, data=Mroz))) cat("\n\n row percents:\n") print(rowPercents(xtabs(~ hc + lfp, data=Mroz))) cat("\n\n total percents:\n") print(totPercents(xtabs(~ hc + wc, data=Mroz))) cat("\n\n three-way table, column percents:\n") print(colPercents(xtabs(~ lfp + wc + hc, data=Mroz))) } } \author{John Fox \email{jfox@mcmaster.ca}} \keyword{misc} RcmdrMisc/man/Dotplot.Rd0000744000176200001440000000304212560142400014616 0ustar liggesusers\name{Dotplot} \alias{Dotplot} \title{ Dot Plots } \description{ Dot plot of numeric variable, either using raw values or binned, optionally classified by a factor. Dot plots are useful for visualizing the distrbution of a numeric variable in a small data set.} \usage{ Dotplot(x, by, bin = FALSE, breaks, xlim, xlab = deparse(substitute(x))) } \arguments{ \item{x}{a numeric variable.} \item{by}{optinally a factor by which to classify \code{x}.} \item{bin}{if \code{TRUE} (the default is \code{FALSE}), the values of \code{x} are binned, as in a histogram, prior to plotting.} \item{breaks}{breaks for the bins, in a form acceptable to the \code{\link{hist}} function; the default is \code{"Sturges"}.} \item{xlim}{optional 2-element numeric vector giving limits of the horizontal axis.} \item{xlab}{optional character string to label horizontal axis.} } \details{ If the \code{by} argument is specified, then one dot plot is produced for each level of \code{by}; these are arranged vertically and all use the same scale for \code{x}. An attempt is made to adjust the size of the dots to the space available without making them too big. } \value{ Returns \code{NULL} invisibly. } \author{ John Fox \email{jfox@mcmaster.ca} } \seealso{ \code{\link{hist}} } \examples{ if (require(car)){ data(Duncan) with(Duncan, { Dotplot(education) Dotplot(education, bin=TRUE) Dotplot(education, by=type) Dotplot(education, by=type, bin=TRUE) }) } } \keyword{hplot} RcmdrMisc/man/assignCluster.Rd0000744000176200001440000000324312560142400016022 0ustar liggesusers\name{assignCluster} \alias{assignCluster} \title{Append a Cluster Membership Variable to a Dataframe} \description{ Correctly creates a cluster membership variable that can be attached to a dataframe when only a subset of the observations in that dataframe were used to create the clustering solution. NAs are assigned to the observations of the original dataframe not used in creating the clustering solution. } \usage{ assignCluster(clusterData, origData, clusterVec) } \arguments{ \item{clusterData}{The data matrix used in the clustering solution. The data matrix may have have only a subset of the observations contained in the original dataframe.} \item{origData}{The original dataframe from which the data used in the clustering solution were taken.} \item{clusterVec}{An integer variable containing the cluster membership assignments for the observations used in creating the clustering solution. This vector can be created using \code{cutree} for clustering solutions generated by \code{hclust} or the \code{cluster} component of a list object created by \code{kmeans} or \code{KMeans}.} } \value{ A factor (with integer labels) that indicate the cluster assignment for each observation, with an NA value given to observations not used in the clustering solution. } \author{Dan Putler} \seealso{\code{\link[stats]{hclust}}, \code{\link[stats]{cutree}}, \code{\link[stats]{kmeans}}, \code{\link{KMeans}}} \examples{ data(USArrests) USArrkm3 <- KMeans(USArrests[USArrests$UrbanPop<66, ], centers=3) assignCluster(USArrests[USArrests$UrbanPop<66, ], USArrests, USArrkm3$cluster) } \keyword{misc} RcmdrMisc/man/lineplot.Rd0000744000176200001440000000146712560142400015030 0ustar liggesusers\name{lineplot} \alias{lineplot} \title{ Plot a one or more lines. } \description{ This function plots lines for one or more variables against another variable --- typically time series against time. } \usage{ lineplot(x, ..., legend) } \arguments{ \item{x}{variable giving horizontal coordinates.} \item{\dots}{one or more variables giving vertical coordinates.} \item{legend}{plot legend? Default is \code{TRUE} if there is more than one variable to plot and \code{FALSE} is there is just one.} } \value{ Produces a plot; returns \code{NULL} invisibly. } \author{ John Fox \email{jfox@mcmaster.ca} } \examples{ if (require("car")){ data(Bfox) Bfox$time <- as.numeric(rownames(Bfox)) with(Bfox, lineplot(time, menwage, womwage)) } } \keyword{hplot} RcmdrMisc/man/indexplot.Rd0000744000176200001440000000205012560142400015175 0ustar liggesusers\name{indexplot} \alias{indexplot} \title{ Index Plots } \description{ Index plot with point identification. } \usage{ indexplot(x, labels = seq_along(x), id.method = "y", type = "h", id.n = 0, ylab, ...) } \arguments{ \item{x}{numeric variable.} \item{labels}{point labels.} \item{id.method}{method for identifying points; see \code{\link[car]{showLabels}}.} \item{type}{to be passed to \code{\link{plot}}.} \item{id.n}{number of points to identify; see \code{\link[car]{showLabels}}.} \item{ylab}{label for vertical axis; if missing, will be constructed from \code{x}.} \item{\dots}{to be passed to \code{plot}.} } \value{ Returns labelled indices of identified points or (invisibly) \code{NULL} if no points are identified. } \author{ John Fox \email{jfox@mcmaster.ca} } \seealso{ \code{\link[car]{showLabels}}, \code{\link{plot.default}} } \examples{ if (require("car")){ data(Prestige) with(Prestige, indexplot(income, id.n=2, labels=rownames(Prestige))) } } \keyword{hplot} RcmdrMisc/man/reliability.Rd0000744000176200001440000000251412560142400015505 0ustar liggesusers\name{reliability} \alias{reliability} \alias{print.reliability} \title{Reliability of a Composite Scale} \description{ Calculates Cronbach's alpha and standardized alpha (lower bounds on reliability) for a composite (summated-rating) scale. Standardized alpha is for the sum of the standardized items. In addition, the function calculates alpha and standardized alpha for the scale with each item deleted in turn, and computes the correlation between each item and the sum of the other items. } \usage{ reliability(S) \method{print}{reliability}(x, digits=4, ...) } \arguments{ \item{S}{the covariance matrix of the items; normally, there should be at least 3 items and certainly no fewer than 2.} \item{x}{reliability object to be printed.} \item{digits}{number of decimal places.} \item{...}{not used: for compatibility with the print generic."} } \value{ an object of class reliability, which normally would be printed. } \references{ N. Cliff (1986) Psychological testing theory. Pp. 343--349 in S. Kotz and N. Johnson, eds., \emph{Encyclopedia of Statistical Sciences, Vol. 7}. Wiley.} \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link{cov}}} \examples{ if (require(car)){ data(DavisThin) reliability(cov(DavisThin)) } } \keyword{misc} RcmdrMisc/man/mergeRows.Rd0000744000176200001440000000172212560142400015146 0ustar liggesusers\name{mergeRows} \Rdversion{1.1} \alias{mergeRows} \alias{mergeRows.data.frame} \title{ Function to Merge Rows of Two Data Frames. } \description{ This function merges two data frames by combining their rows. } \usage{ mergeRows(X, Y, common.only = FALSE, ...) \method{mergeRows}{data.frame}(X, Y, common.only = FALSE, ...) } \arguments{ \item{X}{First data frame.} \item{Y}{Second data frame.} \item{common.only}{If \code{TRUE}, only variables (columns) common to the two data frame are included in the merged data set; the default is \code{FALSE}.} \item{\dots}{Not used.} } \value{A data frame containing the rows from both input data frames.} \author{John Fox} \seealso{For column merges and more complex merges, see \code{\link[base]{merge}}.} \examples{ if (require(car)){ data(Duncan) D1 <- Duncan[1:20,] D2 <- Duncan[21:45,] D <- mergeRows(D1, D2) print(D) dim(D) } } \keyword{manip} RcmdrMisc/man/numSummary.Rd0000744000176200001440000000414412560142400015352 0ustar liggesusers\name{numSummary} \alias{numSummary} \alias{print.numSummary} \title{Summary Statistics for Numeric Variables} \description{ \code{numSummary} creates neatly formatted tables of means, standard deviations, coefficients of variation, skewness, kurtosis, and quantiles of numeric variables. } \usage{ numSummary(data, statistics=c("mean", "sd", "se(mean)", "IQR", "quantiles", "cv", "skewness", "kurtosis"), type=c("2", "1", "3"), quantiles=c(0, .25, .5, .75, 1), groups) \method{print}{numSummary}(x, ...) } \arguments{ \item{data}{a numeric vector, matrix, or data frame.} \item{statistics}{any of \code{"mean"}, \code{"sd"}, \code{"se(mean)"}, \code{"quantiles"}, \code{"cv"} (coefficient of variation --- sd/mean), \code{"skewness"}, or \code{"kurtosis"}, defaulting to \code{c("mean", "sd", "quantiles", "IQR")}.} \item{type}{definition to use in computing skewness and kurtosis; see the \code{\link[e1071]{skewness}} and \code{\link[e1071]{kurtosis}} functions in the \pkg{e1071} package. The default is \code{"2"}.} \item{quantiles}{quantiles to report; default is \code{c(0, 0.25, 0.5, 0.75, 1)}.} \item{groups}{optional variable, typically a factor, to be used to partition the data.} \item{x}{object of class \code{"numSummary"} to print.} \item{\dots}{arguments to pass down from the print method.} } \value{ \code{numSummary} returns an object of class \code{"numSummary"} containing the table of statistics to be reported along with information on missing data, if there are any. } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link[base]{mean}}, \code{\link[stats]{sd}}, \code{\link[stats]{quantile}}, \code{\link[e1071]{skewness}}, \code{\link[e1071]{kurtosis}}.} \examples{ if (require("car")){ data(Prestige) Prestige[1, "income"] <- NA print(numSummary(Prestige[,c("income", "education")], statistics=c("mean", "sd", "quantiles", "cv", "skewness", "kurtosis"))) print(numSummary(Prestige[,c("income", "education")], groups=Prestige$type)) remove(Prestige) } } \keyword{misc} RcmdrMisc/man/Hist.Rd0000744000176200001440000000263412560142400014106 0ustar liggesusers\name{Hist} \alias{Hist} \title{Plot a Histogram} \description{ This function is a wrapper for the \code{\link[graphics]{hist}} function in the \code{base} package, permitting percentage scaling of the vertical axis in addition to frequency and density scaling. } \usage{ Hist(x, groups, scale=c("frequency", "percent", "density"), xlab=deparse(substitute(x)), ylab=scale, main="", breaks = "Sturges", ...) } \arguments{ \item{x}{a vector of values for which a histogram is to be plotted.} \item{groups}{a factor to create histograms by group with common horizontal and vertical scales.} \item{scale}{the scaling of the vertical axis: \code{"frequency"} (the default), \code{"percent"}, or \code{"density"}.} \item{xlab}{x-axis label, defaults to name of variable.} \item{ylab}{y-axis label, defaults to value of \code{scale}.} \item{main}{main title for graph, defaults to empty.} \item{breaks}{see the \code{breaks} argument for \code{\link{hist}}.} \item{\dots}{arguments to be passed to \code{hist}.} } \value{ This function returns \code{NULL}, and is called for its side effect --- plotting a histogram. } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link[graphics]{hist}}} \examples{ data(Prestige, package="car") Hist(Prestige$income, scale="percent") with(Prestige, Hist(income, groups=type)) } \keyword{hplot} RcmdrMisc/man/partial.cor.Rd0000744000176200001440000000160012560142400015405 0ustar liggesusers\name{partial.cor} \alias{partial.cor} \title{Partial Correlations} \description{ Computes a matrix of partial correlations between each pair of variables controlling for the others. } \usage{ partial.cor(X, tests=FALSE, use=c("complete.obs", "pairwise.complete.obs")) } \arguments{ \item{X}{data matrix.} \item{tests}{show two-sided p-value and p-value adjusted for multiple testing by Holm's method for each partial correlation?} \item{use}{observations to use to compute partial correlations, default is \code{"complete.obs"}.} } \value{ Returns the matrix of partial correlations, optionally with adjusted and unadjusted p-values. } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link[stats]{cor}}} \examples{ data(DavisThin, package="car") partial.cor(DavisThin) partial.cor(DavisThin, tests=TRUE) } \keyword{misc} RcmdrMisc/man/plotMeans.Rd0000744000176200001440000000371412560142400015141 0ustar liggesusers\name{plotMeans} \alias{plotMeans} \title{Plot Means for One or Two-Way Layout} \description{ Plots cell means for a numeric variable in each category of a factor or in each combination of categories of two factors, optionally along with error bars based on cell standard errors or standard deviations. } \usage{ plotMeans(response, factor1, factor2, error.bars = c("se", "sd", "conf.int", "none"), level=0.95, xlab = deparse(substitute(factor1)), ylab = paste("mean of", deparse(substitute(response))), legend.lab = deparse(substitute(factor2)), main = "Plot of Means", pch = 1:n.levs.2, lty = 1:n.levs.2, col = palette(), ...) } \arguments{ \item{response}{Numeric variable for which means are to be computed.} \item{factor1}{Factor defining horizontal axis of the plot.} \item{factor2}{If present, factor defining profiles of means} \item{error.bars}{If \code{"se"}, the default, error bars around means give plus or minus one standard error of the mean; if \code{"sd"}, error bars give plus or minus one standard deviation; if \code{"conf.int"}, error bars give a confidence interval around each mean; if \code{"none"}, error bars are suppressed.} \item{level}{level of confidence for confidence intervals; default is .95} \item{xlab}{Label for horizontal axis.} \item{ylab}{Label for vertical axis.} \item{legend.lab}{Label for legend.} \item{main}{Label for the graph.} \item{pch}{Plotting characters for profiles of means.} \item{lty}{Line types for profiles of means.} \item{col}{Colours for profiles of means} \item{\ldots}{arguments to be passed to \code{plot}.} } \value{ The function invisibly returns \code{NULL}. } \examples{ if (require(car)){ data(Moore) with(Moore, plotMeans(conformity, fcategory, partner.status, ylim=c(0, 25))) } } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link[stats]{interaction.plot}}} \keyword{hplot}