ica/0000755000176200001440000000000013301453745011013 5ustar liggesusersica/NAMESPACE0000644000176200001440000000022112566470653012236 0ustar liggesusersexportPattern("^[[:alpha:]]+") importFrom("graphics", "par", "plot") importFrom("stats", "dexp", "dnorm", "dt", "rexp", "rnorm", "rt", "runif")ica/R/0000755000176200001440000000000013301451203011177 5ustar liggesusersica/R/acy.R0000644000176200001440000000111412566471655012124 0ustar liggesusersacy <- function(X,Y){ ###### Amari-Cichocki-Yang Error ###### Nathaniel E. Helwig (helwig@umn.edu) ###### Last modified: August 23, 2015 X <- as.matrix(X) Y <- as.matrix(Y) xd <- dim(X) if(xd[1] != xd[2]){stop("X must be square matrix.")} yd <- dim(Y) if(yd[1] != yd[2]){stop("Y must be square matrix.")} if(xd[1] != yd[1]){stop("X and Y must be same dimension.")} A <- abs(solve(Y)%*%X) rowprt <- sum( (rowSums(A)/apply(A,1,max)) - 1 ) colprt <- sum( (colSums(A)/apply(A,2,max)) - 1 ) (rowprt + colprt) / (2*xd[1]) }ica/R/sdiag.R0000644000176200001440000000007712522050777012434 0ustar liggesuserssdiag <- function(x){ if(length(x)<2L) matrix(x) else diag(x) }ica/R/icaplot.R0000644000176200001440000000140613301345745012773 0ustar liggesusersicaplot <- function(xseq = seq(-2,2,length.out=500), xlab = "", ylab = "", lty = 1, lwd = 1, col = "black", ...){ if(length(lty)!=18L){ lty <- rep(lty[1],18) } if(length(lwd)!=18L){ lwd <- rep(lwd[1],18) } if(length(col)!=18L){ col <- rep(col[1],18) } xlim <- range(xseq) par(mfrow=c(6,3)) for(i in 1:18){ myfun <- as.character(letters[i]) kurto <- icasamp(myfun,"kur") myden <- icasamp(myfun,"pdf",data=xseq) tit1p <- bquote("("*.(myfun)*")") tit2p <- bquote(k==.(round(kurto,2))) mytit <- bquote(.(tit1p)*" "*.(tit2p)) plot(xseq,myden,type="l",ylim=c(0,max(myden)+.1), xlab=xlab,ylab=ylab,main=mytit,lty=lty[i], lwd=lwd[i],col=col[i],...) } }ica/R/icafast.R0000644000176200001440000001157613301345364012760 0ustar liggesusersicafast <- function(X, nc, center = TRUE, maxit = 100, tol = 1e-6, Rmat = diag(nc), alg = c("par", "def"), fun = c("logcosh", "exp", "kur"), alpha = 1){ ###### Fast Independent Component Analysis ###### Nathaniel E. Helwig (helwig@umn.edu) ###### Last modified: May 23, 2018 ### initial checks X <- as.matrix(X) nobs <- nrow(X) nvar <- ncol(X) nc <- as.integer(nc[1]) if(nc<1){ stop("Must set nc>=1 component.") } maxit <- as.integer(maxit[1]) if(maxit<1){ stop("Must set maxit>=1 iteration.") } tol <- tol[1] if(tol<=0){ stop("Must set ctol>0.") } if(nc>min(nobs,nvar)){ stop("Too many components. Set nc<=min(dim(X)).") } alpha <- alpha[1] if(alpha<1 | alpha>2){ stop("Must set 'alpha' between 1 and 2.") } if(nrow(Rmat)!=nc | ncol(Rmat)!=nc){ stop("Input 'Rmat' must be nc-by-nc rotation matrix.") } ### center and whiten if(center) X <- scale(X,scale=FALSE) xeig <- eigen(crossprod(X)/nobs,symmetric=TRUE) nze <- sum(xeig$val>xeig$val[1]*.Machine$double.eps) if(nzetol && itertol && itertol && iter-sqrt(3))) } else if(query=="kur"){ return(-1.2) } } else if(dname=="d"){ # Student t with df=5 if(query=="rnd"){ return(rt(nsamp,5)) } else if(query=="pdf"){ return(dt(data,5)) } else if(query=="kur"){ return(6) } } else if(dname=="e"){ # Exponential if(query=="rnd"){ return(-1+rexp(nsamp)) } else if(query=="pdf"){ return(dexp(data+1)) } else if(query=="kur"){ return(6) } } else if(dname=="f"){ # Mixture 2 Double Exponential prop <- rep(0.5,2) mus <- c(-1,1) covs <- rep(0.5,2) if(query=="rnd"){ idx <- sample(1:2,nsamp,replace=TRUE) return(sign(runif(nsamp)-0.5)*rexp(nsamp,sqrt(2))*covs[idx]+mus[idx]) } else if(query=="pdf"){ myden <- 0 for(i in 1:2){ myden <- myden + prop[i]/covs[i]*exp(-sqrt(2)*abs(data-mus[i])/covs[i])/sqrt(2) } return(myden) } else if(query=="kur"){ mus <- mus*covs mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]+mus[i]^3) x4 <- x4 + prop[i]*(6*covs[i]^2+6*covs[i]*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="g"){ # Mixture 2 Gaussian (symmetric & multimodal) prop <- rep(0.5,2) mus <- c(-0.5,0.5) covs <- rep(.15,2) if(query=="rnd"){ idx <- sample(1:2,nsamp,replace=TRUE) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:2){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="h"){ # Mixture 2 Gaussian (symmetric & transitional) prop <- rep(0.5,2) mus <- c(-0.5,0.5) covs <- rep(0.4,2) if(query=="rnd"){ idx <- sample(1:2,nsamp,replace=TRUE) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:2){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="i"){ # Mixture 2 Gaussian (symmetric & unimodal) prop <- rep(0.5,2) mus <- c(-0.5,0.5) covs <- rep(0.5,2) if(query=="rnd"){ idx <- sample(1:2,nsamp,replace=TRUE) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:2){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="j"){ # Mixture 2 Gaussian (nonsymmetric & multimodal) prop <- c(1,3) prop <- prop/sum(prop) mus <- c(-0.5,0.5) covs <- rep(0.15,2) if(query=="rnd"){ idx <- sample(1:2,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:2){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="k"){ # Mixture 2 Gaussian (nonsymmetric & transitional) prop <- c(1,2) prop <- prop/sum(prop) mus <- c(-0.7,0.5) covs <- rep(0.4,2) if(query=="rnd"){ idx <- sample(1:2,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:2){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="l"){ # Mixture 2 Gaussian (nonsymmetric & unimodal) prop <- c(1,2) prop <- prop/sum(prop) mus <- c(-0.7,0.5) covs <- rep(0.5,2) if(query=="rnd"){ idx <- sample(1:2,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:2){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="m"){ # Mixture 4 Gaussian (symmetric & multimodal) prop <- c(1,2,2,1) prop <- prop/sum(prop) mus <- c(-1,-.33,.33,1) covs <- rep(0.16,4) if(query=="rnd"){ idx <- sample(1:4,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:4){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="n"){ # Mixture 4 Gaussian (symmetric & transitional) prop <- c(1,2,2,1) prop <- prop/sum(prop) mus <- c(-1,-.2,.2,1) covs <- c(.2,.3,.3,.2) if(query=="rnd"){ idx <- sample(1:4,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:4){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="o"){ # Mixture 4 Gaussian (symmetric & unimodal) prop <- c(1,2,2,1) prop <- prop/sum(prop) mus <- c(-.7,-.2,.2,.7) covs <- c(.2,.3,.3,.2) if(query=="rnd"){ idx <- sample(1:4,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:4){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="p"){ # Mixture 4 Gaussian (nonsymmetric & multimodal) prop <- c(1,1,2,1) prop <- prop/sum(prop) mus <- c(-1,.3,-.3,1.1) covs <- c(.2,.2,.2,.2) if(query=="rnd"){ idx <- sample(1:4,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:4){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="q"){ # Mixture 4 Gaussian (nonsymmetric & transitional) prop <- c(1,3,2,.5) prop <- prop/sum(prop) mus <- c(-1,-.2,.3,1) covs <- c(.2,.3,.2,.2) if(query=="rnd"){ idx <- sample(1:4,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:4){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } else if(dname=="r"){ # Mixture 4 Gaussian (nonsymmetric & unimodal) prop <- c(1,2,2,1) prop <- prop/sum(prop) mus <- c(-.8,-.2,.2,.5) covs <- c(.22,.3,.3,.2) if(query=="rnd"){ idx <- sample(1:4,nsamp,replace=TRUE,prob=prop) return(rnorm(nsamp,mus[idx],covs[idx])) } else if(query=="pdf"){ myden <- 0 for(i in 1:4){ myden <- myden + prop[i]*dnorm(data,mus[i],covs[i]) } return(myden) } else if(query=="kur"){ mu <- x2 <- x4 <- x3 <- 0 for(i in 1:length(prop)){ mu <- mu + prop[i]*mus[i] x2 <- x2 + prop[i]*(mus[i]^2+covs[i]^2) x3 <- x3 + prop[i]*(3*mus[i]*covs[i]^2+mus[i]^3) x4 <- x4 + prop[i]*(3*covs[i]^4+6*covs[i]^2*mus[i]^2+mus[i]^4) } return((x4-4*mu*x3+6*mu^2*x2-3*mu^4)/(x2-mu^2)^2-3) } } # end if(dname=="a") }ica/R/icajade.R0000644000176200001440000000714613301345647012730 0ustar liggesusersicajade <- function(X, nc, center = TRUE, maxit = 100, tol = 1e-6, Rmat = diag(nc)){ ###### Joint Approximate Diagonalization of Eigenmatrices (JADE) ###### Nathaniel E. Helwig (helwig@umn.edu) ###### Last modified: May 23, 2018 ### initial checks X <- as.matrix(X) nobs <- nrow(X) nvar <- ncol(X) nc <- as.integer(nc[1]) if(nc<1){ stop("Must set nc>=1 component.") } maxit <- as.integer(maxit[1]) if(maxit<1){ stop("Must set maxit>=1 iteration.") } tol <- tol[1] if(tol<=0){ stop("Must set ctol>0.") } if(nc>min(nobs,nvar)){ stop("Too many components. Set nc<=min(dim(X)).") } if(nrow(Rmat)!=nc | ncol(Rmat)!=nc){ stop("Input 'Rmat' must be nc-by-nc rotation matrix.") } ### center and whiten if(center) X <- scale(X,scale=FALSE) xeig <- eigen(crossprod(X)/nobs,symmetric=TRUE) nze <- sum(xeig$val>xeig$val[1]*.Machine$double.eps) if(nze1){ for(j in 1:(i-1)){ Xj <- Xw[,j] Qij <- crossprod(matrix(Xi*Xj/nobs,nobs,nc)*Xw,Xw) - tcrossprod(idmat[,i],idmat[,j]) - tcrossprod(idmat[,j],idmat[,i]) emats[,crng] <- sqrt(2)*Qij crng <- crng + nc } # end if(i>1) } # end for(j in 1:(i-1)) } # end for(i in 1:nc) ### iterative rotation npairs <- nc*(nc-1)/2 thetas <- rep(1,npairs) iter <- 0 vtol <- 1 while(vtol>tol && itertol && iter=1 component.") } maxit <- as.integer(maxit[1]) if(maxit<1){ stop("Must set maxit>=1 iteration.") } tol <- tol[1] if(tol<=0){ stop("Must set ctol>0.") } if(nc>min(nobs,nvar)){ stop("Too many components. Set nc<=min(dim(X)).") } if(nrow(Rmat)!=nc | ncol(Rmat)!=nc){ stop("Input 'Rmat' must be nc-by-nc rotation matrix.") } fun <- fun[1] if(fun=="ext"){ signs <- sign(signs) if(length(signs)!=nc){ stop("Input 'signs' must be have length equal to 'nc' input.") } } else { signs <- NA signswitch <- FALSE } alg <- alg[1] if(alg=="gradient"){ rate <- rate[1] if(rate<=0){ stop("Must set 'rate' greater than 0.") } if(!is.null(rateanneal[1])){ if(length(rateanneal)!=2L){ stop("Input 'rateanneal' should be two-element vector.") } if(rateanneal[1]<=0 || rateanneal[1]>=90){ stop("Input 'rateanneal[1]' should be in range (0,90).") } if(rateanneal[2]<=0 || rateanneal[2]>1){ stop("Input 'rateanneal[2]' should be in range (0,1].") } ralog <- TRUE } else { ralog <- FALSE } } ### center and whiten if(center) X <- scale(X,scale=FALSE) xeig <- eigen(crossprod(X)/nobs,symmetric=TRUE) nze <- sum(xeig$val>xeig$val[1]*.Machine$double.eps) if(nzetol && itertol && itertol && itertol && iterp4 ɠE6 UFFx kouoM?iko']z&鲢\z-DMK,ٽB2?K&ҷ{ʮL]By>u)= 6dZ?"+zke5Q=53p\6HÓimT0} πEPj|EZW|R]Wq[D/_ 25%C)[뽨57,=F^cldrc%;E*&++ bQ :(d]4-elhE(0LbKQMU(^G!<eڐn(@/|OE&z]Vu,AW,UO,CdWgf-V 눯G>Ke/C#,?c/H#~? 7ˬ][TnsU>IB;67ve֨PKF*WN<>'@? C-QQQrB.X+i X5[kko5D{/%Rc0ƐSjT~| ,$ \#_<ўDpl|GpߎHH'Z D3q$`h8ҰH5șO݊y2nZLcw^@G̍qs gk-<ǚ{&SxO,.{OOTdFJECݛ]RRRPM&f56iY4͗$+j AXM'd+IVdKB&J4Mt2&ɩiPN7>GyhuGGFi`rTdE{ckԴhɜ&!8Cw9cg gG 5eT0y2gR2 e@`=s?Jt0M֓9n s#95CjhTMy \Ipc"~7X$mf? 2yD NfȁeS㤬$f&Fbi6U^*o!|ȜWѪr2D$:K,j0Gl|#[~,ZVZ7 !Ql.6l㐥dәc=2{i%jdJ1N0cIβ2;~(SI׎tR::ZŕVAeJŲ)& !HG#h@Ȇ1%YJ; SGt7RވinnArݮbvWJ(/~3 T,@<Fc.Xa)a%|NyGyZР@3 bn&Ch n\KpAh"nFV{?B3مX?CpBbuOP?%ia_e-8*%jvo' IlN1"gٖƲ-PNjgNa˖*K]=3Ag\D9-E`"s|: 2@O_B3|Ipqwl' :خ]gU0P, 󘷰\ "H2(l6N%a~$J1,RmvY _L~9mځM7%j%[:estR(7_!; opl"0逐FGxNwvN'1;gJKTR?BEʉ}̀-1 AJK$Ȟ!j(U=^?[Rަֱn$5/_`8OI2OB3vrMnrt{'qz^:[H@n$&OgIɎlT(blz(2g9IL"+47n#}DULPÄ3 ٭fu"z,-1MG4Xgz2Y!ɗIdi`v_&]:ψ:o'g:]9?l W,/ou}EebPuhHQeC[Ћ-tIZku_o?oV f5'iҎ\ԼDɛ׳ߐ|%֛g9^_s؟1,uY5ycq} upxCSiP{nAW]|RJ}" z˺lCiځɞfѺAԆ⠯@2Aj:^&iᩫףW9a.@y% } \details{ \bold{ICA Model} The ICA model can be written as \code{X=tcrossprod(S,M)+E}, where columns of \code{S} contain the source signals, \code{M} is the mixing matrix, and columns of \code{E} contain the noise signals. Columns of \code{X} are assumed to have zero mean. The goal is to find the unmixing matrix \code{W} such that columns of \code{S=tcrossprod(X,W)} are independent as possible. \bold{Whitening} Without loss of generality, we can write \code{M=P\%*\%R} where \code{P} is a tall matrix and \code{R} is an orthogonal rotation matrix. Letting \code{Q} denote the pseudoinverse of \code{P}, we can whiten the data using \code{Y=tcrossprod(X,Q)}. The goal is to find the orthongal rotation matrix \code{R} such that the source signal estimates \code{S=Y\%*\%R} are as independent as possible. Note that \code{W=crossprod(R,Q)}. \bold{FastICA} The FastICA algorithm finds the orthogonal rotation matrix \code{R} that (approximately) maximizes the negentropy of the estimated source signals. Negentropy is approximated using \deqn{J(s) = [E\{G(s)\}-E\{G(z)\} ]^2} where \emph{E} denotes the expectation, \emph{G} is the contrast function, and \emph{z} is a standard normal variable. See Hyvarinen (1999) for specifics of fixed-point algorithm. } \examples{ ########## EXAMPLE 1 ########## # generate noiseless data (p==r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(4),2,2) Xmat <- tcrossprod(Amat,Bmat) # ICA via FastICA with 2 components imod <- icafast(Xmat,2) acy(Bmat,imod$M) cor(Amat,imod$S) ########## EXAMPLE 2 ########## # generate noiseless data (p!=r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(200),100,2) Xmat <- tcrossprod(Amat,Bmat) # ICA via FastICA with 2 components imod <- icafast(Xmat,2) cor(Amat,imod$S) ########## EXAMPLE 3 ########## # generate noisy data (p!=r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(200),100,2) Emat <- matrix(rnorm(10^5),1000,100) Xmat <- tcrossprod(Amat,Bmat)+Emat # ICA via FastICA with 2 components imod <- icafast(Xmat,2) cor(Amat,imod$S) } ica/man/icajade.Rd0000644000176200001440000000727613301345672013450 0ustar liggesusers\name{icajade} \alias{icajade} \title{ ICA via JADE Algorithm } \description{ Computes ICA decomposition using Cardoso and Souloumiac's (1993, 1996) Joint Approximate Diagonalization of Eigenmatrices (JADE) approach. } \usage{ icajade(X, nc, center = TRUE, maxit = 100, tol = 1e-6, Rmat = diag(nc)) } \arguments{ \item{X}{ Data matrix with \code{n} rows (samples) and \code{p} columns (variables). } \item{nc}{ Number of components to extract. } \item{center}{ If \code{TRUE}, columns of \code{X} are mean-centered before ICA decomposition. } \item{maxit}{ Maximum number of algorithm iterations to allow. } \item{tol}{ Convergence tolerance. } \item{Rmat}{ Initial estimate of the \code{nc}-by-\code{nc} orthogonal rotation matrix. } } \value{ \item{S}{Matrix of source signal estimates (\code{S=Y\%*\%R}).} \item{M}{Estimated mixing matrix.} \item{W}{Estimated unmixing matrix (\code{W=crossprod(R,Q)}).} \item{Y}{Whitened data matrix.} \item{Q}{Whitening matrix.} \item{R}{Orthogonal rotation matrix.} \item{vafs}{Variance-accounted-for by each component.} \item{iter}{Number of algorithm iterations.} } \references{ Cardoso, J.F., & Souloumiac, A. (1993). Blind beamforming for non-Gaussian signals. \emph{IEE Proceedings-F, 140}, 362-370. Cardoso, J.F., & Souloumiac, A. (1996). Jacobi angles for simultaneous diagonalization. \emph{SIAM Journal on Matrix Analysis and Applications, 17}, 161-164. Helwig, N.E. & Hong, S. (2013). A critique of Tensor Probabilistic Independent Component Analysis: Implications and recommendations for multi-subject fMRI data analysis. \emph{Journal of Neuroscience Methods, 213}, 263-273. } \author{ Nathaniel E. Helwig } \details{ \bold{ICA Model} The ICA model can be written as \code{X=tcrossprod(S,M)+E}, where columns of \code{S} contain the source signals, \code{M} is the mixing matrix, and columns of \code{E} contain the noise signals. Columns of \code{X} are assumed to have zero mean. The goal is to find the unmixing matrix \code{W} such that columns of \code{S=tcrossprod(X,W)} are independent as possible. \bold{Whitening} Without loss of generality, we can write \code{M=P\%*\%R} where \code{P} is a tall matrix and \code{R} is an orthogonal rotation matrix. Letting \code{Q} denote the pseudoinverse of \code{P}, we can whiten the data using \code{Y=tcrossprod(X,Q)}. The goal is to find the orthongal rotation matrix \code{R} such that the source signal estimates \code{S=Y\%*\%R} are as independent as possible. Note that \code{W=crossprod(R,Q)}. \bold{JADE} The JADE approach finds the orthogonal rotation matrix \code{R} that (approximately) diagonalizes the cumulant array of the source signals. See Cardoso and Souloumiac (1993,1996) and Helwig and Hong (2013) for specifics of the JADE algorithm. } \examples{ ########## EXAMPLE 1 ########## # generate noiseless data (p==r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(4),2,2) Xmat <- tcrossprod(Amat,Bmat) # ICA via JADE with 2 components imod <- icajade(Xmat,2) acy(Bmat,imod$M) cor(Amat,imod$S) ########## EXAMPLE 2 ########## # generate noiseless data (p!=r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(200),100,2) Xmat <- tcrossprod(Amat,Bmat) # ICA via JADE with 2 components imod <- icajade(Xmat,2) cor(Amat,imod$S) ########## EXAMPLE 3 ########## # generate noisy data (p!=r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(200),100,2) Emat <- matrix(rnorm(10^5),1000,100) Xmat <- tcrossprod(Amat,Bmat)+Emat # ICA via JADE with 2 components imod <- icajade(Xmat,2) cor(Amat,imod$S) } ica/man/acy.Rd0000644000176200001440000000300012566474505012633 0ustar liggesusers\name{acy} \alias{acy} \title{ Amari-Cichocki-Yang Error } \description{ The Amari-Cichocki-Yang (ACY) error is an asymmetric measure of dissimilarity between two nonsingular matrices \code{X} and \code{Y}. The ACY error: (a) is invariant to permutation and rescaling of the columns of \code{X} and \code{Y}, (b) ranges between 0 and \code{n-1}, and (c) equals 0 if and only if \code{X} and \code{Y} are identical up to column permutations and rescalings. } \usage{ acy(X,Y) } \arguments{ \item{X}{ Nonsingular matrix of dimension \eqn{n \times n} (test matrix). } \item{Y}{ Nonsingular matrix of dimension \eqn{n \times n} (target matrix). } } \value{ Returns a scalar (the ACY error). } \references{ Amari, S., Cichocki, A., & Yang, H.H. (1996). A new learning algorithm for blind signal separation. In D. S. Touretzky, M. C. Mozer, and M. E. Hasselmo (Eds.), \emph{Advances in Neural Information Processing Systems, 8}. Cambridge, MA: MIT Press. } \author{ Nathaniel E. Helwig } \details{ The ACY error is defined as \deqn{\frac{1}{2n}\sum_{i=1}^{n}\left(\frac{\sum_{j=1}^{n}|a_{ij}|}{\max_{j}|a_{ij}|}-1\right) + \frac{1}{2n}\sum_{j=1}^{n}\left(\frac{\sum_{i=1}^{n}|a_{ij}|}{\max_{i}|a_{ij}|}-1\right) } where \eqn{a_{ij} = (\mathbf{Y}^{-1}\mathbf{X})_{ij}}. } \section{Warnings }{ If \code{Y} is singular, function will produce an error. } \examples{ ########## EXAMPLE ########## set.seed(1) X <- matrix(runif(16),4,4) Y <- matrix(runif(16),4,4) Z <- X[,c(3,1,2,4)]\%*\%diag(1:4) acy(X,Y) acy(X,Z) } ica/man/icaimax.Rd0000644000176200001440000001124613301345602013464 0ustar liggesusers\name{icaimax} \alias{icaimax} \title{ ICA via Infomax Algorithm } \description{ Computes ICA decomposition using Bell and Sejnowski's (1995) Information-Maximization (Infomax) approach with various options. } \usage{ icaimax(X, nc, center = TRUE, maxit = 100, tol = 1e-6, Rmat = diag(nc), alg = c("newton", "gradient"), fun = c("tanh", "log", "ext"), signs = rep(1, nc), signswitch = TRUE, rate = 1, rateanneal = NULL) } \arguments{ \item{X}{ Data matrix with \code{n} rows (samples) and \code{p} columns (variables). } \item{nc}{ Number of components to extract. } \item{center}{ If \code{TRUE}, columns of \code{X} are mean-centered before ICA decomposition. } \item{maxit}{ Maximum number of algorithm iterations to allow. } \item{tol}{ Convergence tolerance. } \item{Rmat}{ Initial estimate of the \code{nc}-by-\code{nc} orthogonal rotation matrix. } \item{alg}{ Algorithm to use: \code{alg="newton"} for Newton iteration, and \code{alg="gradient"} for gradient descent. } \item{fun}{ Nonlinear (squashing) function to use for algorithm: \code{fun="tanh"} for hyperbolic tangent, \code{fun="log"} for logistic, and \code{fun="ext"} for extended Infomax. } \item{signs}{ Vector of length \code{nc} such that \code{signs[j]==1} if j-th component is super-Gaussian and \code{signs[j]==-1} if j-th component is sub-Gaussian. Only used if \code{fun="ext"}. Ignored if \code{signswitch=TRUE}. } \item{signswitch}{ If \code{TRUE}, the \code{signs} vector is automatically determined from the data; otherwise a confirmatory ICA decomposition is calculated using input \code{signs} vector. Only used if \code{fun="ext"}. } \item{rate}{ Learing rate for gradient descent algorithm. Ignored if \code{alg="newton"}. } \item{rateanneal}{ Annealing angle and proportion for gradient descent learing rate (see Details). Ignored if \code{alg="newton"}. } } \value{ \item{S}{Matrix of source signal estimates (\code{S=Y\%*\%R}).} \item{M}{Estimated mixing matrix.} \item{W}{Estimated unmixing matrix (\code{W=crossprod(R,Q)}).} \item{Y}{Whitened data matrix.} \item{Q}{Whitening matrix.} \item{R}{Orthogonal rotation matrix.} \item{vafs}{Variance-accounted-for by each component.} \item{iter}{Number of algorithm iterations.} \item{alg}{Algorithm used (same as input).} \item{fun}{Contrast function (same as input).} \item{signs}{Component signs (same as input).} \item{rate}{Learning rate (same as input).} } \references{ Bell, A.J. & Sejnowski, T.J. (1995). An information-maximization approach to blind separation and blind deconvolution. \emph{Neural Computation, 7}, 1129-1159. } \author{ Nathaniel E. Helwig } \details{ \bold{ICA Model} The ICA model can be written as \code{X=tcrossprod(S,M)+E}, where columns of \code{S} contain the source signals, \code{M} is the mixing matrix, and columns of \code{E} contain the noise signals. Columns of \code{X} are assumed to have zero mean. The goal is to find the unmixing matrix \code{W} such that columns of \code{S=tcrossprod(X,W)} are independent as possible. \bold{Whitening} Without loss of generality, we can write \code{M=P\%*\%R} where \code{P} is a tall matrix and \code{R} is an orthogonal rotation matrix. Letting \code{Q} denote the pseudoinverse of \code{P}, we can whiten the data using \code{Y=tcrossprod(X,Q)}. The goal is to find the orthongal rotation matrix \code{R} such that the source signal estimates \code{S=Y\%*\%R} are as independent as possible. Note that \code{W=crossprod(R,Q)}. \bold{Infomax} The Infomax approach finds the orthogonal rotation matrix \code{R} that (approximately) maximizes the joint entropy of a nonlinear function of the estimated source signals. See Bell and Sejnowski (1995) and Helwig (in prep) for specifics of algorithms. } \examples{ ########## EXAMPLE 1 ########## # generate noiseless data (p==r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(4),2,2) Xmat <- tcrossprod(Amat,Bmat) # ICA via Infomax with 2 components imod <- icaimax(Xmat,2) acy(Bmat,imod$M) cor(Amat,imod$S) ########## EXAMPLE 2 ########## # generate noiseless data (p!=r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(200),100,2) Xmat <- tcrossprod(Amat,Bmat) # ICA via Infomax with 2 components imod <- icaimax(Xmat,2) cor(Amat,imod$S) ########## EXAMPLE 3 ########## # generate noisy data (p!=r) set.seed(123) nobs <- 1000 Amat <- cbind(icasamp("a","rnd",nobs),icasamp("b","rnd",nobs)) Bmat <- matrix(2*runif(200),100,2) Emat <- matrix(rnorm(10^5),1000,100) Xmat <- tcrossprod(Amat,Bmat)+Emat # ICA via Infomax with 2 components imod <- icaimax(Xmat,2) cor(Amat,imod$S) } ica/man/ica-package.Rd0000644000176200001440000000355013301346435014202 0ustar liggesusers\name{ica-package} \alias{ica-package} \alias{ica} \docType{package} \title{ \packageTitle{ica} } \description{ \packageDescription{ica} } \details{ The DESCRIPTION file: \packageDESCRIPTION{ica} \packageIndices{ica} } \author{ \packageAuthor{ica} Maintainer: \packageMaintainer{ica} } \references{ Amari, S., Cichocki, A., & Yang, H.H. (1996). A new learning algorithm for blind signal separation. In D. S. Touretzky, M. C. Mozer, and M. E. Hasselmo (Eds.), \emph{Advances in Neural Information Processing Systems, 8}. Cambridge, MA: MIT Press. Bach, F.R. (2002). \emph{kernel-ica}. MATLAB toolbox (ver 1.2) http://www.di.ens.fr/~fbach/kernel-ica/. Bach, F.R. & Jordan, M.I. (2002). Kernel independent component analysis. \emph{Journal of Machine Learning Research, 3}, 1-48. Bell, A.J. & Sejnowski, T.J. (1995). An information-maximization approach to blind separation and blind deconvolution. \emph{Neural Computation, 7}, 1129-1159. Cardoso, J.F., & Souloumiac, A. (1993). Blind beamforming for non-Gaussian signals. \emph{IEE Proceedings-F, 140}, 362-370. Cardoso, J.F., & Souloumiac, A. (1996). Jacobi angles for simultaneous diagonalization. \emph{SIAM Journal on Matrix Analysis and Applications, 17}, 161-164. Helwig, N.E. & Hong, S. (2013). A critique of Tensor Probabilistic Independent Component Analysis: Implications and recommendations for multi-subject fMRI data analysis. \emph{Journal of Neuroscience Methods, 213}, 263-273. Hyvarinen, A. (1999). Fast and robust fixed-point algorithms for independent component analysis. \emph{IEEE Transactions on Neural Networks, 10}, 626-634. Tucker, L.R. (1951). \emph{A method for synthesis of factor analysis studies} (Personnel Research Section Report No. 984). Washington, DC: Department of the Army. } \keyword{ package } \examples{ # See examples for icafast, icaimax, icajade, and icasamp } ica/man/icaplot.Rd0000644000176200001440000000247513301346004013505 0ustar liggesusers\name{icaplot} \alias{icaplot} \title{ Plot Densities of Source Signal Distributions } \description{ Plot density (pdf) and kurtosis for the 18 source signal distributions used in Bach and Jordan (2002); see \code{\link{icasamp}} for more information. } \usage{ icaplot(xseq = seq(-2,2,length.out=500), xlab = "", ylab = "", lty = 1, lwd = 1, col = "black", ...) } \arguments{ \item{xseq}{ Sequence of ordered data values for plotting density. } \item{xlab}{ X-axis label for plot (default is no label). } \item{ylab}{ Y-axis label for plot (default is no label). } \item{lty}{ Line type for each density (scalar or vector of length 18). } \item{lwd}{ Line width for each density (scalar or vector of length 18). } \item{col}{ Line color for each density (scalar or vector of length 18). } \item{...}{ Optional inputs for \code{plot}. } } \value{ Produces a plot with \code{NULL} return value. } \references{ Bach, F.R. (2002). \emph{kernel-ica}. MATLAB toolbox (ver 1.2) http://www.di.ens.fr/~fbach/kernel-ica/. Bach, F.R. & Jordan, M.I. (2002). Kernel independent component analysis. \emph{Journal of Machine Learning Research, 3}, 1-48. } \author{ Nathaniel E. Helwig } \examples{ \dontrun{ ########## EXAMPLE ########## quartz(height=9,width=7) par(mar=c(3,3,3,3)) icaplot() } } ica/man/ica-internal.Rd0000644000176200001440000000050712566474556014444 0ustar liggesusers% Part of the R ica package, % Nathaniel E. Helwig \name{ica-internal} %% List of Internal Functions Called \alias{sdiag} \title{Internal Functions for ica Package} \description{ Internal functions for ica package. } \details{ These functions are not to be called by the user. } \keyword{ internal }