tensorA/0000755000176200001440000000000013755572102011674 5ustar liggesuserstensorA/NAMESPACE0000644000176200001440000000245313755437266013132 0ustar liggesusersimport(stats) exportPattern("^to\\.tensor.*") exportPattern("\\.tensor$") export("norm") export("opnorm") exportPattern("mark.*") export("untensor") #export("slice.tensor<-") exportPattern("as\\..*") exportPattern("^%") exportPattern("contraname") exportPattern("^is\\.") S3method(to.tensor,default) S3method(ftable,tensor) S3method(names,tensor) S3method("names<-",tensor) S3method(dimnames,tensor) S3method("dimnames<-",tensor) S3method(norm,tensor) S3method(opnorm,tensor) S3method(reorder,tensor) S3method(rep,tensor) S3method(solve,tensor) S3method("[[",tensor) S3method("[[<-",tensor) S3method("[",tensor) S3method("%e%",tensor) S3method("%r%",tensor) S3method("+",tensor) S3method("-",tensor) S3method("*",tensor) S3method("/",tensor) S3method(mark,tensor) S3method(mark,numeric) S3method(mark,character) S3method("dim<-",tensor) useDynLib(tensorA,tensoraCmulhelper,tensoramulhelper) S3method("$", tensor) S3method("^", tensor) S3method("|", tensor) S3method(mean, tensor) S3method(as.contravariate, character) S3method(as.covariate, character) S3method(as.tensor, default) S3method(as.tensor, tensor) S3method(is.contravariate, character) S3method(is.contravariate, numeric) S3method(is.contravariate, tensor) S3method(is.covariate, character) S3method(is.covariate, numeric) S3method(is.covariate, tensor) tensorA/man/0000755000176200001440000000000013154210241012431 5ustar liggesuserstensorA/man/addtensor.Rd0000644000176200001440000000257113154210240014707 0ustar liggesusers\name{add.tensor} \alias{add.tensor} \alias{-.tensor} \alias{+.tensor} \alias{*.tensor} \alias{/.tensor} \title{Element-wise arithmetic operations +,-,*,/ with tensors} \description{ Adds/subs/multiplies/devides tensors element by element . The luxury difference to a simple \code{+} is that we do not need to consider the correct permutation of indices or rules on implicit replication, since all of this is handled automatically. } \usage{ add.tensor(X,Y,op="+",only=NULL) ## Methods for class tensor # x + y # x - y # x * y # x - y } \arguments{ \item{X}{a tensor} \item{Y}{a tensor} \item{op}{a binary function used to perform the "addition"} \item{only}{a list of dimnames that may be considered as equal. This parameter is here to allow parallelization of tensors with only partially known structure.} } \value{ A tensor giving the element-wise operation X,Y. If some of the indices are missing in one of the tensors they are added by repetition. } \details{ The tensors are properly reordered such that dimensions of the same name are identified. If dimensions are missing in one of the tensors it is correspondingly repeated. } \seealso{ \code{\link{to.tensor}} } \examples{ A <- to.tensor(1:20,c(U=2,V=2,W=5)) add.tensor(A,A)/2 -A (A+A)/2 A/A A * 1/A norm.tensor(reorder.tensor(A,c(2,3,1)) - A) } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/bindtensor.Rd0000644000176200001440000000146613154210241015076 0ustar liggesusers\name{bind.tensor} \alias{bind.tensor} \title{A cbind/rbind for tensors} \description{ Tensors can be put side by side in one dimension if they are of equal size in all other dimensions. } \usage{ bind.tensor(A,dA=NULL,B,dB=dA) } \arguments{ \item{A}{the first tensor} \item{dA}{the dimension of A to be used for binding the tensors} \item{B}{the second tensor} \item{dB}{the dimension of B to be used for binding the tensors} } \value{ a tensor with the tensors combined to one } \details{ This function works like a cbind or rbind function for tensors. } \note{ binding does not preserve the sequence of the dimensions. } \seealso{ \code{\link{base}{cbind}} } \examples{ A <- to.tensor(1:6,c(a=2,b=3)) bind.tensor(A,"a",A) bind.tensor(A,"b",A) } \author{K.Gerald van den Boogaart} \keyword{arith}tensorA/man/names.Rd0000644000176200001440000000245313154210240014026 0ustar liggesusers\name{names.tensor} \alias{names.tensor} \alias{names<-.tensor} \alias{dimnames<-.tensor} \alias{dimnames.tensor} \alias{dim<-.tensor} \title{Getting and setting index and dimensionnames of a tensor} \description{ The names of a tensor are the names of its dimension } \usage{ \method{names}{tensor}(x) \method{names}{tensor}(x) <- value \method{dimnames}{tensor}(x) \method{dimnames}{tensor}(x) <- value \method{dim}{tensor}(x) <- value } \arguments{ \item{x}{a tensor object} \item{value}{The new value. If this is a named list it replaces the names of the dimensions. If its an unnamed list it gets the names of the dimensions.} } \value{ the names of the dimensions the tensor } \details{ The names of the dimensions of the tensor are very relevant in any tensor arithmetic since they are the principle way to specify the dimensions to be involved in an operation. The dimnames function is here only for convenice to guarantee that the names of the dimnames are always the same as the names of the dimensions and to ensure that always at least a list with the right length and names. } \seealso{ \code{\link{mul.tensor}} } \examples{ A <- to.tensor(1:20,c(U=2,V=2,W=5)) A dim(A) names(A) names(A) <- c("A","B","C") A dim(A) names(A) } \author{K. Gerald van den Boogaart} \keyword{algebra}tensorA/man/riemann.Rd0000644000176200001440000000447513154210240014362 0ustar liggesusers\name{riemann.tensor} \alias{riemann.tensor} \alias{\%r\%} \alias{\%r\%.tensor} \title{Tensor multiplication with Riemann's convention} \description{ Multiplies tensors by multiplying over all pairs with one covariate and one contravariate variable with the same name according to Riemann's summing convention. } \usage{ riemann.tensor(...,only=NULL,by=NULL) ## Methods for class tensor # x \%r\% y ## Default method # x \%r\% y } \arguments{ \item{\dots}{some tensors, or a renaming code} \item{only}{an optional list of the dimension names to be recognized for duplication to allow parallel processing on lists of tensors} \item{x}{a tensor} \item{y}{a tensor} \item{by}{Riemannian summing is done in parallel in these dimensions.} } \value{ the tensor product of all the tensors along all duplicate dimensions. } \details{ see \code{\link{mul.tensor}} on details on tensor multiplication. In \code{einstein.tensor} complex operations can be performed by command and renaming code: The arguments are processed from left to right and multiplied. Unnamed attributes are regarded as tensors or scalars and multiplied with the current result by the Riemann summing convention, which means an inner product over all pairs of covariate and contravariate indices with the same name. Named attributes can either have the name \code{diag}, which performs a \code{diagmul} according to the same-name convention or be of the form \code{A="B"} or \code{"A"="B"}, for which we have two cases. Typically both are given covariate. The first specifies the covariate to be used in the multiplication and the second the contravariate. If both names are present in the current result, an inner multiplication (trace) of on these two dimensions is performed. If only the covariate or the contravariate is present up to this point, the specific dimension is renamed to the second name, but keeps its type. This renaming might be visible in the result or inducing a multiplication according to the Riemann convention later if the other shows up. } \seealso{ \code{\link{mul.tensor}}, \code{\link{to.tensor}}, \code{\link{riemann.tensor}} } \examples{ A <- to.tensor(1:20,c(U=2,"^V"=2,W=5)) B <- to.tensor(1:20,c("^U"=2,V=2,Q=5)) riemann.tensor(A,B) A \%r\% B } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/tomatrixtensor.Rd0000644000176200001440000000265413154210240016030 0ustar liggesusers\name{to.matrix.tensor} \alias{to.matrix.tensor} \title{The matrix corresponding to a tensor seen as a linear mapping of tensors.} \description{ A tensor can be seen as a linear mapping of a tensor to a tensor. This function gives the corresponding matrix of the mapping. } \usage{ to.matrix.tensor(X,i,j,by=NULL) } \arguments{ \item{X}{The tensor } \item{i}{The image indices of the linear mapping} \item{j}{The domain indices of the linear mapping} \item{by}{the operation is done in parallel for these dimensions} } \value{ if no \code{by} is given a matrix. Otherwise a tensor of level \code{2+length(dim(X))[by]} giving matrices for each specification of the by dimensions. } \details{ A tensor can be seen as a linear mapping of a tensor to a tensor. This function computes the corresponding matrix, mapping the entries of the domain tensor to the entries of the image tensor. } \seealso{ \code{\link{to.tensor}}, \code{\link{solve.tensor}}, \code{\link{inv.tensor}}, \code{\link{svd.tensor}} } \examples{ A <- reorder.tensor(to.tensor(1:30,c(a=2,b=3,c=5)),c("c","a","b")) to.matrix.tensor(A,"a",c("b","c")) # matrix(1:30,nrow=2) to.matrix.tensor(A,c("a","b"),c("c")) # matrix(1:30,nrow=6) to.matrix.tensor(A,c("a","b"),by=c("c")) # structure(1:30,dim=c(6,1,5))) to.matrix.tensor(A,c("a"),by=c("c")) # structure(1:30,dim=c(2,3,5))) } \author{K. Gerald van den Boogaart} \keyword{arith} tensorA/man/multensor.Rd0000644000176200001440000000335313154210240014753 0ustar liggesusers\name{mul.tensor} \alias{mul.tensor} \title{Tensor multiplication for the tensor class} \description{ Performs a tensor multiplication like tensor(), but with named indices, keeping dimnames, and vectorized. } \usage{ mul.tensor(X,i=c(),Y,j=i,by=NULL) } \arguments{ \item{X}{a tensor to be multiplied} \item{i}{numeric or character vector specifying the dimension to be used in the multiplication for X} \item{Y}{a tensor to be multiplied} \item{j}{numeric or character vector specifying the dimension to be used in the multiplication for Y} \item{by}{the by dimensions if present and not mentioned in i or j are used as sequence dimensions. tensors in these dimensions are processed in parallel. So in this dimension the product is neither inner nor outer but parallel like \code{a*b}, rather than \code{a\%*\%b} or \code{a\%o\%b}. Unmentioned dimensions get an outer product. Mentioned dimensions an inner. } } \value{ The tensor product of X and Y with respect to the regarding dimensions. } \details{ Say \deqn{X_{i_1\ldots i_n h_1 \ldots h_l}} and \deqn{Y_{j_1\ldots j_n k_1 \ldots k_m}} the the result is: \deqn{E_{h_1\ldots h_l k_1 \ldots k_m}= \sum_{i_1,\ldots,i_n} X_{i_1\ldots i_n h_1 \ldots h_l}Y_{j_1\ldots j_n k_1 \ldots k_m}} This is an full outer product with i,j not given and a full inner product product of i=dim(X) } \seealso{ \code{\link{to.tensor}}, \code{\link{\%e\%}}, \code{\link{\%r\%}}, \code{\link{diagmul.tensor}}, \code{\link{einstein.tensor}}, \code{\link{riemann.tensor}}, \code{\link{solve.tensor}} } \examples{ A <- to.tensor(1:20,c(A=2,B=2,C=5)) B <- to.tensor(1:20,c(D=2,B=2,E=5)) mul.tensor(A,"A",A,"B") } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/reordertensor.Rd0000644000176200001440000000221113154210240015610 0ustar liggesusers\name{reorder.tensor} \alias{reorder.tensor} %\alias{reorder.tidx} \title{Permutation of indices and storage sequence of a tensor} \description{ This permutes tensor dimensions like aperm. However the interface is more flexible since not all dimensions have to given and names can be used instead of numbers. } \usage{ \method{reorder}{tensor}(x,i=NULL,...,by=NULL) %reorder.tidx(x,i,...) } \arguments{ \item{x}{the tensor} \item{i}{numeric or character giving dimensions intended to come first} \item{\dots}{further arguments to other instances of the generic function} \item{by}{the complement of i, if i is not given} } \value{ \code{reorder.tensor} returns a tensor equal to x but stored with a different sequence of dimensions. %\code{reorder.tidx} returns the the %permutation of the memory sequence to achieve that as a numeric %vector. } \details{ the remaining dimensions keep their relative sequence and follow at the end of the dimension attribute. } \seealso{ \code{\link{to.tensor}} } \examples{ A <- to.tensor(1:20,c(A=2,B=2,C=5)) A reorder(A,"C") reorder(A,"B") } \author{K.Gerald v.d. Boogaart} \keyword{array}tensorA/man/normtensor.Rd0000644000176200001440000000313313154210241015126 0ustar liggesusers\name{norm.tensor} \alias{norm} \alias{norm.tensor} \alias{opnorm} \alias{opnorm.tensor} \title{Calculate the Euclidean norm or Euclidean operator norm of a tensor or its subtensors} \description{ Calculates the Euclidean norm of a tensor or its subtensors. } \usage{ norm(X,...) \method{norm}{tensor}(X,i=NULL,...,by=NULL) opnorm(X,...) \method{opnorm}{tensor}(X,i=NULL,...,by=NULL) } \arguments{ \item{X}{The tensor} \item{i}{For norm the dimensions to of the subtensors to be used. If missing the norm of the whole tensor is computed. For opnorm the dimensions of the image.} \item{\dots}{unused} \item{by}{the list dimension, if i is not specified the norm is calculated for each of these in parallel.} } \value{ \item{norm}{either a single number giving the norm of the tensor or a tensors with the dimensions i removed containing the individual norms in each entry.} \item{opnorm}{a tensor of dimension \code{dim(X)[by]} giving the Euclidean operator norm of the tensor (i.e. its largest singular value) } } \details{ \itemize{ \item{norm}{ The function computes the Euclidean norm, which is the square root over the sum of all entries and not the operator norm. } \item{opnorm}{ The function computes the Euclidean operator norm, which is largest factor in changing the Euclidean norm, when mapped with the linear mapping corresponding to the tensor. } } } \seealso{ \code{\link{to.tensor}} } \examples{ C <- to.tensor(1:20,c(A=4,B=5)) norm(C,"A") norm(C,2) norm(C,c("A","B")) opnorm(C,"A") } \author{K. Gerald van den Boogaart} \keyword{algebra}tensorA/man/slicetensor.Rd0000644000176200001440000000333213154210240015252 0ustar liggesusers\name{slice.tensor} \alias{slice.tensor} \alias{[[.tensor} \alias{[.tensor} \alias{[[<-.tensor} \alias{[<-.tensor} \title{Working with the indices of a tensor (accessing, slicing, renaming, ...)} \description{ Indexing of tensors allows beside the ordinary selection of ranges of indices the renaming of indices. The functions are mainly here to keep the the tensor property of the results. } \usage{ slice.tensor(X,i,what,drop=FALSE) ## Methods for class tensor # X[...,drop=TRUE] # X[...,drop=TRUE] <- value # X[[...,drop=TRUE]] # X[[...,drop=TRUE]] <- value } \arguments{ \item{X}{A tensor} \item{i}{an index given as number or character} \item{what}{levels of the index, a number or a character from dimnames} \item{drop}{a boolean, if true, indices with only a single level are removed} \item{\dots}{arguments of the form \code{name=}indices, and for the \code{[[ ]]} functions it also allowed to give names from the corresponding dimnames \code{name=c("a","b")} to select indices by names or \code{name=~newname} to rename dimensions, the first use makes a usual array access in the given dimension, where \code{[[ ]]} only supports a single index, while \code{[ ]} allows vectors. The other type changes the names.} } \value{ a new tensor with dimensions renamed or individual levels selected } \details{ The functions allow to rename dimensions and to take select a part of the tensor. } \seealso{ \code{\link{einstein.tensor}} } \examples{ A <- to.tensor(1:20,c(A=2,B=2,C=5)) A[C=1] A[C=1:3] A[[B=~b]] # renaming dimensions A[[B=~b,A=~aaa]] A[[B=~b,A=~aaa,aaa=1]] A[[A=1,B=~gamma]][C=1:2] A %e% A[[C="D"]] } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/svdtensor.Rd0000644000176200001440000000357613154210240014761 0ustar liggesusers\name{svd.tensor} \alias{svd.tensor} \title{Singular value decomposition of tensors} \description{ A tensor can be seen as a linear mapping of a tensor to a tensor. This function computes the singular value decomposition of this mapping } \usage{ svd.tensor(X,i,j=NULL,...,name="lambda",by=NULL) } \arguments{ \item{X}{The tensor to be decomposed} \item{i}{The image dimensions of the linear mapping} \item{j}{The coimage dimensions of the linear mapping} \item{name}{The name of the eigenspace dimension. This is the dimension created by the decompositions, in which the eigenvectors are \eqn{e_i}} \item{\dots}{further arguments for generic use} \item{by}{the operation is done in parallel for these dimensions} } \value{ a tensor or in case of svd a list u,d,v, of tensors like in \code{\link[base]{svd}}. } \details{ A tensor can be seen as a linear mapping of a tensor to a tensor. Let denote \eqn{R_i} the space of real tensors with dimensions \eqn{i_1...i_d}. \itemize{ \item{svd.tensor}{Computes a singular value decomposition \eqn{u_{i_1...i_d\lambda{}}},\eqn{d_\lambda{}}, \eqn{v_{j_1...j_l}\lambda{}} such that u and v correspond to orthogonal mappings from \eqn{R_\lambda{}} to \eqn{R_i} or \eqn{R_j} respectively.} } } \seealso{ \code{\link{to.tensor}}, \code{\link{to.matrix.tensor}}, \code{\link{inv.tensor}}, \code{\link{solve.tensor}} } \examples{ # SVD A <- to.tensor(rnorm(120),c(a=2,b=2,c=5,d=3,e=2)) SVD <- svd.tensor(A,c("a","d"),c("b","c"),by="e") dim(SVD$v) # Property of decomposition einstein.tensor(SVD$v,diag=SVD$d,SVD$u,by="e") # A # Property of orthogonality SVD$v \%e\% SVD$v[[lambda=~"lambda'"]] # 2*delta.tensor(c(lambda=6)) SVD$u \%e\% SVD$u[[lambda=~"lambda'"]] # 2*delta.tensor(c(lambda=6))) SVD$u \%e\% mark(SVD$u,"'",c("a","d")) # 2*delta.tensor(c(a=2,d=3))) } \author{K. Gerald van den Boogaart} \keyword{arith} tensorA/man/leveltensor.Rd0000644000176200001440000000132613154210240015263 0ustar liggesusers\name{level.tensor} \alias{level.tensor} \title{The level (number of indices) of a tensor} \description{ The level of a tensor is the number of dimensions or subscripts used. } \usage{ level.tensor(X,...) } \arguments{ \item{X}{the tensor to be used} \item{...}{not used} } \value{ the number of levels } \details{ The level of the tensor is the length of its dim attribute. Objects without a dim attribute get level 1 if they are of length > 1 and are marked as scalars by 0 level otherwise. } \seealso{ \code{\link{to.tensor}} } \examples{ A <- to.tensor(1:24,c(a=1,b=2,c=3,d=4)) level.tensor(A) level.tensor(matrix(1)) level.tensor(1:10) level.tensor(1) } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/sequencing.Rd0000644000176200001440000000632713154210240015070 0ustar liggesusers\name{sequencing} \alias{$.tensor} \alias{^.tensor} \alias{|.tensor} \alias{renamefirst.tensor} \title{Working with index sequences} \description{ In typical tensor notation the indices are not identified by names but by positions. The operators allow to identify names and positions transparently during calculation. } \usage{ ## Methods for class tensor # x $ y # x ^ y # x | y renamefirst.tensor(x,y) } \arguments{ \item{x}{A tensor } \item{y}{ Typically a character vector specifying a sequence of names for the tensor. The names can be specified in various ways:\cr The following specifications are equal and specify a sequence of the names i,j and k:\cr \code{x$ijk}, \code{x$i.j.k}, \code{i.j.k.}, \code{x"$ijk"}, \code{x^"i.j.k"}, \code{x^c("i","j","k")},\code{x^c("i.j","k")}, \code{x^c("$i.j","k")},\code{x^c("$ij","k")}, \code{x^c("$","ijk")}\cr In general names are separated by dots. All notations with \code{\$} either as operator or as the first character of the first string allow to omit the dots assuming that all names are single character. If any dot is present all dots must be given. The difference of \code{\$} and \code{\^} is that the first accepts a name and the second an character valued expression. \cr Multi letter indices like "alpha","beta","gamma" can only be given in the dot-free version of the notation making the following specifications equal: \code{x$alpha.beta.gamma}, \code{alpha.beta.gamma.}, \code{x^"$alpha.beta.gamma"}, \code{x^"alpha.beta.gamma"}, \code{x^c("alpha","beta","gamma")}, \code{x^c("alpha.beta","gamma")}, \code{x^c("$alpha.beta","k")}, \code{x^c("$","alpha.beta.gammak")}\cr The specification for \code{|} is equal to that for \code{^}. } } \value{ A tensor of the same shape as x but with reordered dimensions (for \code{|}) or renamed dimensions (for the others) } \details{ These functions are used to mimic the mathematical notation in tensor analysis. Formulae of the form (with Einstein convention): \deqn{E_{ijk}= A_{ihl}C_{hj}C_{lk}}{E_ijk= A_ihl C_hj C_lk } with defined tensors \eqn{A_{ijk}}{A_ijk} and \eqn{C_{ij}}{C_ij} can be given the simple form \cr \code{ E <- A$ihl \%e\% C$hj \%e\% C$lk |"$ijk"}\cr or alternatively for multi letter names:\cr \code{ E <- A$i.h.l \%e\% C$h.j \%e\% C$l.k |"i.j.k"}\cr or more flexible in computation with arguments I,J,K:\cr \code{ E <- A^c(I,"h.l") \%e\% C^c("h",J) \%e\% C^c("l",K) | c(I,J,K)}\cr The \code{$} or \code{^} binds to the tensors with high precedence and renames the first elements. The \code{|} binds with very low precedence and reorders the tensor according to the assumed index sequence of the result afterwards. } \seealso{ \code{\link{reorder.tensor}}, \code{\link{names<-.tensor}}, \code{\link{[[.tensor}} } \examples{ A <- to.tensor(1:20,c(i=5,j=2,k=2)) C <- to.tensor(1:4,c(i=2,j=2)) E <- A$ihl \%e\% C$hj \%e\% C$lk |"$ijk" E # Same as: E2 <- reorder.tensor(A[[j=~h,k=~l]] \%e\% C[[i=~h]] \%e\% C[[i=~l,j=~k]],c("i","j","k")) E-E2 E <- A$i.h.l \%e\% C$h.j \%e\% C$l.k |"i.j.k" E E-E2 E <- A^"i.h.l" \%e\% C^"h.j" \%e\% C^"l.k" |"i.j.k" E E-E2 } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/margin.Rd0000644000176200001440000000145113154210240014175 0ustar liggesusers\name{margin.tensor} \alias{margin.tensor} \title{Marginalization of tensors} \description{ The function removes dimensions from a tensor by summing all entries which only differ in these dimensions. } \usage{ margin.tensor(X,i=NULL,by=NULL) } \arguments{ \item{X}{the tensor} \item{i}{the dimensions to be removed} \item{by}{instead of i the dimensions to be kept} } \value{ The tensor with all elements only differing only in the dimensions specified added up and only the other dimensions left over. } \details{ This is a tensor multiplication with the \eqn{1_i}{1_i} tensor. } \seealso{ \code{\link{to.tensor}} } \examples{ A <- diag(1:5) A margin.tensor(A,1) A <- to.tensor(1:30,dim=c(i=3,j=5,k=2)) ftable(A) margin.tensor(A,"j") } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/deltatensor.Rd0000644000176200001440000000146413154210240015250 0ustar liggesusers\name{delta.tensor} \alias{delta.tensor} \title{Creates a Kronecker delta tensor} \description{ The delta tensor is the tensor equivalent of the identity. } \usage{ delta.tensor(d,mark="'",dn=NULL,by=NULL) } \arguments{ \item{d}{the row dimensions } \item{mark}{a character to be concatenated to the names of the row dimensions to get the column dimension names} \item{dn}{dimnames for the result} \item{by}{the dimensions which should not be duplicated} } \value{ a tensor with dimension \code{c(d,mark(d,mark))} } \details{ \deqn{E_{i_1\ldots i_n j_1\ldots j_n}=\delta_{i_1j_1}\ldots\delta_{i_nj_n}}{ E_{i_1...i_n j_1...j_n}=\delta_{i_1j_1}...\delta_{i_n j_n}} } \seealso{ \code{\link{to.tensor}} } \examples{ delta.tensor(c(a=2,b=3)) } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/toPos.Rd0000644000176200001440000000245713154210240014033 0ustar liggesusers\name{toPos.tensor} \alias{toPos.tensor} \title{get the position of an index of tensor } \description{ Calculates the position of a tensor index, which specified in any possible way. } \usage{ toPos.tensor(M,l=NULL,mnames=names(dim(M)),by=NULL,...,both=FALSE,missing.ok=FALSE) } \arguments{ \item{M}{a tensor} \item{l}{a vector specifying the indices as positions or names} \item{mnames}{The names of the indices of the tensor. This can be specified instead of M. } \item{both}{Matches the index in its covariate and contravariate form. } \item{by}{the list dimension, all operations are done in parallel for all levels of these dimensions. Thus in the case of toPos all other dimensions are returned if they are not specified.} \item{...}{not used} \item{missing.ok}{If TRUE does give an error on missing dimension. Rather returns NA in that place.} } \value{ a numeric vector giving the positions of the dimensions selected. } \details{ The function is only here to provide a consistent interface which provides the same functionality for positions and characters. } \examples{ A <- to.tensor(1:30,c(a=2,b=3,c=5)) toPos.tensor(A,c("b","c")) toPos.tensor(A,c(2,1)) # only returns the values toPos.tensor(A,c("^a"),both=TRUE) } \author{K. Gerald van den Boogaart} \keyword{arith}tensorA/man/tensorA.package.Rd0000644000176200001440000002223313154210240015726 0ustar liggesusers\name{tensorA-package} \alias{tensorA-package} \alias{tensor} \alias{Tensor} \alias{tensorA} \docType{package} \title{The tensorA package for tensor arithmetic} \description{ tensorA stands for "tensor arithmetic". A tensor is a mathematical generalization of vector and matrix with many applications in physics, geometry and in the statistics of vectors valued data. However the package is also useful in any case, where computations on sequences of matrices, vectors or even tensors is involved. } \details{ \tabular{ll}{ Package: \tab tensorA\cr Type: \tab Package\cr Version: \tab 0.1\cr Date: \tab 2006-06-08\cr License: \tab GPL Version 2 or newer\cr } The tensorA package is made to allow programming for tensors in R on the same level of abstraction as we know from matrices. It provides many of the mathematical operations common in tensor arithmetics including the whole tensor calculus of covariate and contravariate indices, naming of indices, sequence of indices, decompositions of tensors, Einstein and Riemann summing conventions and vectorized computations on datasets of tensors just like the well vectorization of numbers in R. It provides tools to write tensor formulae very close to there paper form and to handle tensors of arbitrary level with simple programs. \cr The whole documentation of the package is best read in pdf or dvi format since it contains complicated mathematical formulae with multi-indices. \cr Simply speaking a tensor (see \code{\link{to.tensor}}) is just a multidimensional array \code{A[,,]}. The number of indices (i.e. \code{length(dim(A))} is called the level of the tensor (see \code{\link{level.tensor}}). A tensor is mathematically it is denoted by a core symbol (e.g. A) with multiple indices:e.g. \deqn{A_{ijk}} The indices \eqn{i,j,k} can be seen as names for the dimensions and as integer numbers giving the respective index into the array. However the tensor is an algebraical object with many algebraical operations defined on it, which are also of relevancy for programming, e.g. in the parallel treatment of multiple linear equation systems. To understand the package we need to understand tensors including their mathematical origin, the corresponding calculus, notation and basic operations. \cr One mathematical interpretation of a tensor, which is most relevant for physics, that of a multi-linear form of \eqn{level(A)} vectors, i.e. a function of \eqn{level(A)} many vectors to the real or complex numbers, which is linear with respect to each of its arguments. E.g. the two vectors "plane face direction" and "force direction" are mapped to the actual force by the stress tensor. \cr Row vectors are a special case of that and likewise column vectors as linear forms for row vectors. Matrices are bilinear forms of a row vector and a column vector. Thus Vectors and Matrices are examples of tensors of level 1 and 2. Another interpretation of a tensor is the that of a linear mapping, quite like a matrix, but from a tensor space (e.g. the space of matrices or vectors seen as tensor) to another tensor space (e.g. again a space of matrices). An example for that is the Hook elasticity tensor mapping the strain tensor (i.e. a matrix describing the local deformation) to the stress tensor (i.e. a matrix describing the local forces). The Hook tensor is a tensor of level 4. Statistically relevant tensors of level 4 are e.g. covariances of matrices mapping two linear forms (i.e. 2 level 2 tensors) on observed matrices to there covariance. The mapping is performed with the tensor product, which is not unlike a matrix product, however more general. Let denote \eqn{A} a matrix and \eqn{v} a vector, we would write \eqn{r=Ab} for the matrix product and \code{r <- A\%*\%b} in R, which is defined as: \deqn{r_i = \sum_{j=1}^{j_{\max}} A_{ij}b_j }{r_i = \sum_{j=1}^{j_{\max}} A_{ij}b_j } We know that we have to use the \(j\)-dimension in the summing, since the matrix multiplication rule says "row times column". Since a tensor can have more than two indices there is no row or column specified and we need to specify the inner product differently. To do this in the Einstein-Notation writing the tensor always with indices \eqn{r_i=A_{ij}b_j} and according to the Einstein summing rule the entries of \(r_i\) are given by an implicit sum over all indices which show up twice in this notation: \deqn{ r_i=\sum_{j=1}^{j_{\max}} A_{ij}b_j }{ r_i=\sum_{j=1}^{j_{\max}} A_{ij}b_j } This notation allows for a multitude of other products: \eqn{ A_{ij}b_i=t(A)b }, \eqn{ A_{ij}b_k=outer(A,b) }{ A_{ij}b_k=outer(A,b) } , \eqn{ A_{ii}b_j=trace(A)b }{ A_{ii}b_j=trace(A)b } with equal simplicity and without any additional functions. More complicated products involving more than tensors of level two can not even be formulated in pure matrix algebra without re-dimensioning of arrays e.g. \eqn{b_ib_jb_k}{b_ib_jb_k}, \eqn{A_{ijk}b_j}{A_{ijk}b_j}. The Einstein summing rule is implemented in \code{\link{einstein.tensor}} and supported by the index sequencing functions \code{\link{$.tensor}} and \code{\link{|.tensor}}. A general multiplication allowing to identify and sum over any two indices is implemented in \code{\link{trace.tensor}}, when the indices are in the same tensor and in \code{\link{mul.tensor}}, when the indices to sum over are in different tensors. \cr Tensors with the same level and dimensions (identified by name and dimension) can also be added like matrices according to the rule that the values with the same combination of index values are added (see \code{\link{add.tensor}}). The implementation takes care of the sequence of the indices and rearranges them accordingly to match dimensions with the same name. E.g. the tensor addition \deqn{E_ijk=A_{ijk}+B_{kji}}{E_ijk=A_ijk+B_kji} has the effect, which is expressed by the same formula read in entries, which is also true for the more surprising \deqn{E_ijk=A_{ij}+B_{kj}}{E_ij=A_ijk+B_kj} \cr Like a matrix a tensor can also be seen as a mapping from one tensor space to another: \deqn{A_{i_1\ldots i_d j_1 \ldots j_e}x_{j_1 \ldots j_e}=b_{i_1\ldots i_d}}{A_{i_1...i_d j_1...j_e}x_{j_1...j_e}=b_{i_1...i_d} } In this reading all the standard matrix computations and decompositions get a tensorial interpretation and generalization. The package provides some of these (see \code{\link{svd.tensor}}). \cr Another interpretation of tensors is as a sequence of tensors of lower level. E.g. a data matrix is seen as a sequence of vectors in multivariate dataset. The tensorA library provides means to do computation on these in parallel on these sequences of tensors like we can do parallel computation on sequences of numbers. This is typically done by the \code{by=} argument present in most functions and giving the index enumerating the elements of the sequence.\cr E.g. If we have sequence \eqn{V_{ijd}} of variance matrices \eqn{V_{ij}} of some sequence \eqn{v_{id}} of vectors and we would like to transform the vectors with some Matrix \eqn{M_{i'i}} we would get the sequence of transformed variances by \eqn{V_{ijd} M_{i'i}M_{j'j}}. However if the \eqn{M_{ki}} are different for each of the elements in sequence we would have stored them in a tensor \eqn{M_{kid}} and would have to replace \eqn{M_{kid}} with \eqn{M_{kidd'}=M_{kid}} if \eqn{d=d'} and zero otherwise. We can than get our result by \deqn{V_{ijd}M_{i'id'd}M_{j'jd'd''}} and we would have a by dimension of \code{by="d"}. These operations are not strictly mathematical tensor operation, but generalizations of the vectorization approach of R. This is also closely related to \code{\link{diagmul.tensor}} or \code{\link{diag.tensor}}. \cr To complicate things the Einstein rule is only valid in case of tensors represented with respect to a orthogonal basis. Otherwise tensors get lower and upper indices like \deqn{A_{i\cdot k}^{\cdot j \cdot}} for representation in the covariate and contravariate form of the basis. In this case the Riemann summing rule applies which only sums over pairs of the same index, where one is in the lower and one is in the upper position. The contravariate form is represented with indices prefixed by \code{^}. \cr The state of being covariate or contravariate can be changed by the dragging rule, which allows to switch between both state through the multiplication with the geometry tensors \eqn{g_i^{\;j}}{g_i^j}. This can be done through \code{\link{drag.tensor}}. } \seealso{ \code{\link{to.tensor}}, \code{\link{mul.tensor}} , \code{\link{einstein.tensor}}, \code{\link{add.tensor}}, \code{\link{[[.tensor}}, \code{\link{|.tensor}} } \examples{ A <- to.tensor( 1:20, c(a=2,b=2,c=5) ) A ftable(A) B <- to.tensor( c(0,1,1,0) , c(a=2,"a'"=2)) A \%e\% B drag.tensor( A , B, c("a","b")) A \%e\% one.tensor(c(c=5))/5 # a mean of matrices reorder.tensor(A,c("c","b","a")) A - reorder.tensor(A,c("c","b","a")) # =0 since sequence is irrelevant inv.tensor(A,"a",by="c") } \author{K.Gerald van den Boogaart Maintainer: K. Gerald van den Boogaart Depends: R (>= 2.2.0), stats Description: Provides convenience functions for advanced linear algebra with tensors and computation with data sets of tensors on a higher level abstraction. It includes Einstein and Riemann summing conventions, dragging, co- and contravariate indices, parallel computations on sequences of tensors. License: GPL (>= 2) URL: http://www.stat.boogaart.de/tensorA Encoding: latin1 NeedsCompilation: yes Packaged: 2020-11-19 09:50:24 UTC; tolosa53 Repository: CRAN Date/Publication: 2020-11-19 22:40:02 UTC tensorA/tests/0000755000176200001440000000000013154210237013025 5ustar liggesuserstensorA/tests/checker.R0000644000176200001440000005043113154210237014557 0ustar liggesusersrequire(tensorA) if(FALSE) { # Commands for testing debugger() rm(list=objects()) options(error=dump.frames) detach("package:tensorA") dyn.unload("/home/boogaart/R/tensorA/tests/../../tensorA.Rcheck/tensorA/libs/tensorA.so") library(tensorA,lib.loc="../../tensorA.Rcheck") } set.seed(23) summary.tensor <- function(x,...) { n <- level.tensor(x) d <- dim(x) dm <- pmin(d,1) dm2<- pmin(d,2) print(dim(x)) if( !all(sapply(dimnames(x),is.null))) print(dimnames(x)) if( prod(dim(x)) < 10 ) print(x) else if( n == 1 ) print(x[1:min(length(x),10)]) else if( n == 2 ) print(x[1:dm[1],1:dm[2]]) else if( n == 3 ) print(x[1:dm[1],1:dm[2],1:dm2[3],drop=FALSE]) else if( n == 4 ) print(x[1:(dm2[1]),1:dm[1],1,1,drop=FALSE]) else if( n == 5 ) print(x[1:(dm2[1]),1:dm[1],1,1,1,drop=FALSE]) else if( n == 6 ) print(x[1:(dm2[1]),1:dm[1],1,1,1,1,drop=FALSE]) else print(x[1:min(length(x),10)]) } checker <- function(x,y) UseMethod("checker") checker.default <- function(x,y) { cat("Wrong type\n",deparse(match.call()),"\n---------------y=\n") print(y) cat("\n------------x=\n") print(x) stop("Unkown type") } checker.tensor <- function(x,y) { if( !cmp(x,y) ) { cat("Misfit\n",deparse(match.call()),"\n------------------y=\n") print(y) cat("\n-----------------------x=\n") print(x) # print(summary(x)) # print(summary(y)) stop("Missfit"); } else { x } } print.tensor <- function(x){ print.default(unclass(x)) print(dim(x)) } cmp.tensor <- function(x,y) { if(!is.null(names(y))) { mat <- match(names(y),names(x)) if( any(is.na(mat)) ) { print(names(y)) print(names(x)) return(FALSE) } else x <- reorder.tensor(x,mat) } return( length(x)==length(y) && !any(is.na(x)) && sum(abs(c(x)-c(y))^2)<1E-10 && length(dim(x))==length(dim(y)) && !any(is.na(dim(x))) && !any(is.na(dim(y))) && all(dim(x)==dim(y))) } cmp <- function(x,y) UseMethod("cmp") cmp.character <- function(x,y) { return( length(x)==length(y) && all(x==y) ) } cmp.numeric <- function(x,y){ if( length(x) != length(y) ) return(FALSE) if(!is.null(names(y))) { if( is.null(names(y)) || !all( names(x) %in% names(y) ) ) return(FALSE) if( ! identical(any( x[match(names(y),names(x))]!=y ),FALSE) ) return(FALSE) return(TRUE) } else { return(all(x==y)) } } cmp.default <- function(x,y) identical(x,y) checker.numeric <- function(x,y) { if( !cmp(x,y) ) { cat("Misfit\n",deparse(match.call()),"\n-----------------y=\n") print(y) cat("\n-------------------------x=\n") print(x) stop("Missfit"); } else print(x) } checker.character <- function(x,y) { if( !cmp(x,y) ) { cat("Misfit\n",deparse(match.call()),"\n-----------------y=\n") print(y) cat("\n----------------------------y=\n") print(x) stop("Missfit"); } else print(x) } checker.list <- function(x,y) { if( length(x) != length(y) || identical(!all(mapply(cmp,x,y)),TRUE) ) { cat("Misfit\n",deparse(match.call()),"\n---------------y=\n") print(y) cat("\n----------------------x=\n") print(x) stop("Missfit"); } else print(x) } nn <- function(...,nn=list(...)) { lapply(1:length(nn),function(i) paste(names(nn)[i],1:nn[[i]],sep="")) } # to.tensor I4 <- diag(4) names(dim(I4))<-c("Q","z") checker(to.tensor(I4,c(a=2,b=2)), to.tensor(c(diag(4)),c(a=2,b=2,z=4))) dimnames(I4) <- nn(Q=4,z=4) checker(to.tensor(I4,c(a=2,b=2),nn(a=2,b=2)), to.tensor(c(diag(4)),c(a=2,b=2,z=4),nn(a=2,b=2,z=4))) # KT5 <- to.tensor(rnorm(30),c(a=3,b=2,c=5),nn(a=3,b=2,c=5)) dim(KT5[[c=2]]) dim(KT5[,,1:2]) summary(KT5) KT5 <- to.tensor(rnorm(30),c(a=3,b=10,c=1),nn(a=3,b=10,c=1)) drop(KT5) checker(as.tensor(KT5),KT5) checker(as.tensor.default(KT5),KT5) checker(to.tensor(KT5),KT5) # I <- to.tensor(diag(3),c(a=3,b=3),what=1:2) checker(to.tensor(I),I) checker(as.tensor(I),I) checker(inv.tensor(I,"a","b"),I) R1 <- matrix(rnorm(9),nrow=3) R1i <- solve(R1) R2 <- to.tensor(R1,c(a=3,b=3),what=1:2) R2i <- to.tensor(R1i,c(b=3,a=3),what=1:2) checker(inv.tensor(R2,"a","b"),R2i) checker(inv.tensor(R2,"a","b",allowSingular=TRUE),R2i) checker(inv.tensor(rep(R2,4,1,"K"),"a","b",by="K"),rep(R2i,4,1,"K")) checker(inv.tensor(rep(R2,4,1,"K"),"a","b",by="K",allowSingular=TRUE),rep(R2i,4,3,"K")) R3 <- to.tensor(rnorm(15),c(a=3,z=5)) checker(mul.tensor(R2i,"b",mul.tensor(R2,"a",R3)),R3) checker(solve.tensor(R2i,R3[[z=1]],"a"),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a"),mul.tensor(R2,"a",R3)) checker(solve.tensor(R2i,R3[[z=1]],"a",allowSingular=TRUE),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a",allowSingular=T),mul.tensor(R2,"a",R3)) checker(solve.tensor(rep(R2i,4,1,"K"),R3[[z=1]],"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),rep(R3[[z=1]],4,1,"K"),"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,rep(R3[[z=1]],4,1,"K"),"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),R3[[z=1]],"a",by="K",allowSingular=TRUE),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),rep(R3[[z=1]],4,1,"K"),"a",by="K",allowSingular=TRUE),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,rep(R3[[z=1]],4,1,"K"),"a",by="K",allowSingular=TRUE),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,R3,"a"),mul.tensor(R2,"a",R3)) checker(solve.tensor(R2i,R3[[z=1]],"a",allowSingular=TRUE),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a",allowSingular=TRUE),mul.tensor(R2,"a",R3)) summary(I) A <- to.tensor(c(diag(3)),c(a=3,b=3)) A <- to.tensor(c(1,1,1,0,1,1,0,0,1),c(a=3,b=3)) checker(mul.tensor(A,"b",A,"a"),to.tensor(c(A%*%A),c(a=3,b=3))) A <- to.tensor(c(1,1,1,1,0,1,1,1,0,0,1,1),c(a=4,b=3)) checker(mul.tensor(A,"b",A[[a=~c]],"b"),to.tensor(c(A%*%t(A)),c(a=4,c=4))) A <- to.tensor(rnorm(15),c(a=5,b=3)) checker(mul.tensor(A,"b",A[[a=~c]],"b"),to.tensor(c(A%*%t(A)),dim=c(a=5,c=5))) A <- to.tensor(rnorm(5*3),c(a=5,b=3)) B <- to.tensor(rnorm(3*17),c(a=3,b=17)) checker(mul.tensor(A,"b",B,"a"),to.tensor(c(A%*%B),c(a=5,b=17))) A <- to.tensor(c(1,1,1,0,1,1,0,0,1),c(a=3,b=3)) # A <- to.tensor(c(1,1,1,0,1,1,0,0,1),c(a=3,b=3)) solve.tensor(mul.tensor(I,"b",I,"a"),I,"a","a") checker(solve.tensor(mul.tensor(I,"b",I,"a"),I,"a","b"),I) mul.tensor(A,"b",I,"a") checker(solve.tensor(mul.tensor(A,"b",I[[a=~c]],"b"),A,"a","a",allowSingular=TRUE),I[[a=~c]]) B <- A[[b=~z]] checker(solve.tensor(B,mul.tensor(A,"a",B,"a"),"z","z",allowSingular=TRUE),A) checker(solve.tensor(B,mul.tensor(A,"a",B,"a"),"z","z",allowSingular=TRUE), solve.tensor(B,mul.tensor(A,"a",B,"a"),"z","z")) solve.tensor(mul.tensor(A,"b",A,"a"),A,"a","a") checker(solve.tensor(A[[b=~c]],mul.tensor(A,"b",A,"a"),"a","a"),structure(A,dim=c(c=3,b=3))) A <- to.tensor(c(1,1,1,1,0,1,1,1,0,0,1,1),c(a=4,b=3)) A solve.tensor(A[[b=~c]],mul.tensor(A,"a",A[[b=~c]],"a"),"c",allowSingular=TRUE) A <- to.tensor(rnorm(100),c(a=4,b=5,c=5)) An <- to.tensor(rnorm(100),c(a=4,b=5,c=5),nn(a=4,b=5,c=5)) B <- to.tensor(rnorm(100),c(d=4,e=5,f=5)) An <- to.tensor(rnorm(100),c(a=4,b=5,c=5),nn(a=4,b=5,c=5)) mt <- mul.tensor(A,2,B,2) checker(einstein.tensor(A,b="e",B),mt) G <- to.tensor(rnorm(20*20*3),c(a=4,b=5,a1=4,b1=5,I=3)) mul.tensor(A,c("a","b"),G,c("a","b")) checker(einstein.tensor(A,G),einstein.tensor(G,A)) checker(einstein.tensor(A,G),einstein.tensor(G,A)) checker(einstein.tensor(einstein.tensor(A,G),inv.tensor(G,c("a","b"),by="I"),by=c("I")),rep(A,dim(G)["I"],1,"I")) einstein.tensor(A,G) ## chol A <- to.tensor(rnorm(15),c(a=3,b=5)) AAt <- einstein.tensor(A,mark(A,i="a")) ch <- chol.tensor(AAt,"a","a'",name="lambda") #names(ch)[1]<-"lambda" checker(einstein.tensor(ch,mark(ch,i="a")),AAt) A <- to.tensor(rnorm(30),c(a=3,b=5,c=2)) AAt <- einstein.tensor(A,mark(A,i="a"),by="c") ch <- chol.tensor(AAt,"a","a'",name="lambda") checker(einstein.tensor(ch,mark(ch,i="a"),by="c"),AAt) ftable(A) # norm A <- to.tensor(c(1,1,1,1,0,1,1,1,0,0,1,1),c(a=4,b=3)) checker(norm.tensor(A),sqrt(9)) checker(norm.tensor(A,c(1,2)),sqrt(9)) checker(norm.tensor(A,"b"),to.tensor(sqrt(c(1,2,3,3)),c(a=4))) checker(norm.tensor(A,"a"),to.tensor(sqrt(c(4,3,2)),c(b=3))) checker(norm.tensor(A,by="a"),to.tensor(sqrt(c(1,2,3,3)),c(a=4))) checker(norm.tensor(A,by="b"),to.tensor(sqrt(c(4,3,2)),c(b=3))) # opnorm A <- to.tensor(c(1,0,0,0,1,0,0,1),c(a=2,b=2,s=2)) checker(opnorm(A,"a",by="s"),to.tensor(c(1,1),c(s=2))) # margin A <- to.tensor(rnorm(30),c(a=3,b=2,c=5)) checker( margin.tensor(A,c("a","c")),einstein.tensor(A,one.tensor(c(a=3,c=5)))) checker( margin.tensor(A,by=c("a","c")),einstein.tensor(A,one.tensor(c(b=2)))) checker(one.tensor(c(a=3,c=5)),to.tensor(rep(1,15),c(a=3,c=5))) # diagmul A <- to.tensor(rnorm(30),c(a=3,b=2,c=5)) B <- to.tensor(rnorm(6),c(a=3,b=2)) checker(einstein.tensor(A,diag.tensor(B,mark="m")), diagmul.tensor(A,B,i=c("a","b"))[[a=~am,b=~bm]]) # is.tensor if( !identical(c( is.tensor(FALSE), is.tensor(TRUE), is.tensor(matrix(1:3)), is.tensor(as.tensor(matrix(1:3))), is.tensor(A)),c(FALSE,FALSE,FALSE,TRUE,TRUE))) stop("Fehler") # A <- to.tensor(rnorm(30),c(a=3,b=2,c=5)) checker(reorder.tensor(reorder.tensor(reorder.tensor(A,c("c","b","a")),c(1,3,2)),c("a","b","c")),A) mul.tensor(A,c(),B,c(),by=c("a","b")) #### complex A <- to.tensor( c(1+1i,1,0,2-13i) , c(a=2,b=2) ) B <- to.tensor( c(1+1i,1,0,2-13i) , c(a=2,b=2) ) A Am <- matrix(c(A),nrow=nrow(A)) Bm <- matrix(c(B),nrow=nrow(B)) checker(mul.tensor(A,"b",B,"a"),to.tensor(c(Am%*%Bm),c(nrow(A),ncol(B)))) R1 <- matrix(rnorm(9)+rnorm(9)*1i,nrow=3) R1i <- solve(R1) R2 <- to.tensor(R1,c(a=3,b=3),what=1:2) R2i <- to.tensor(R1i,c(b=3,a=3),what=1:2) checker(inv.tensor(R2,"a","b"),R2i) checker(inv.tensor(R2,"a","b",allowSingular=TRUE),R2i) checker(inv.tensor(rep(R2,4,1,"K"),"a","b",by="K"),rep(R2i,4,1,"K")) checker(inv.tensor(rep(R2,4,1,"K"),"a","b",by="K",allowSingular=TRUE),rep(R2i,4,3,"K")) R3 <- to.tensor(rnorm(15),c(a=3,z=5)) checker(mul.tensor(R2i,"b",mul.tensor(R2,"a",R3)),R3) checker(solve.tensor(R2i,R3[[z=1]],"a"),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a"),mul.tensor(R2,"a",R3)) checker(solve.tensor(R2i,R3[[z=1]],"a",allowSingular=TRUE),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a",allowSingular=T),mul.tensor(R2,"a",R3)) checker(solve.tensor(rep(R2i,4,1,"K"),R3[[z=1]],"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),rep(R3[[z=1]],4,1,"K"),"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,rep(R3[[z=1]],4,1,"K"),"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),R3[[z=1]],"a",by="K",allowSingular=T),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),rep(R3[[z=1]],4,1,"K"),"a",by="K",allowSingular=T),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,rep(R3[[z=1]],4,1,"K"),"a",by="K",allowSingular=T),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) # trace.tensor A <- to.tensor(rep(1,16),c(a=4,b=4)) checker(trace.tensor(A,"a","b"),4) A <- to.tensor(rep(1,16*3),c(a=4,c=3,b=4)) checker(trace.tensor(A,"a","b"),to.tensor(rep(4,3),c(c=3))) A <- to.tensor(1:(2*2*3*3*5),c(a=2,b=3,c=2,d=3,e=5)) erg <- sapply(1:5,function(e) sum(diag(matrix(A[[e=e]],nrow=6)))) checker(trace.tensor(reorder(A,c(3,4,1,5,2)),c("a","b"),c("c","d")),to.tensor(c(erg),c(e=5))) # delta.tensor checker(delta.tensor(c(a=2,b=3)),to.tensor(c(diag(6)),c(a=2,b=3,"a'"=2,"b'"=3))) # diag.tensor A <- to.tensor(1:6,c(a=2,b=3)) checker(diag.tensor(A),to.tensor(c(diag(c(A))),c(a=2,b=3,"a'"=2,"b'"=3))) A <- to.tensor(1:6,c(a=2,b=3)) checker(diag.tensor(A,by="b"),to.tensor(c(1,0,0,2,3,0,0,4,5,0,0,6),c(a=2,"a'"=2,b=3))) # tripledelta.tensor checker(tripledelta.tensor(c(a=2)),to.tensor(c(1,0,0,0,0,0,0,1),c(a=2,"a'"=2,"a*"=2))) checker(tripledelta.tensor(c(a=2,b=4)), einstein.tensor(tripledelta.tensor(c(a=2)),tripledelta.tensor(c(b=4)) ) ) # one.tensor checker( one.tensor(c(a=3,b=4)), to.tensor(rep(1,12),c(a=3,b=4))) checker(level.tensor(A),length(dim(A))) # svd.tensor A <- to.tensor(rnorm(120),c(a=2,b=2,c=5,d=3,e=2)) SVD <- svd.tensor(A,c("a","d"),c("b","c"),by="e") dim(SVD$v) # Kompositionseigenschaft checker(einstein.tensor(SVD$v,diag=SVD$d,SVD$u,by="e"),A) # Orthogonalitaet: checker( SVD$v %e% SVD$v[[lambda=~"lambda'"]],2*delta.tensor(c(lambda=6))) checker( SVD$u %e% SVD$u[[lambda=~"lambda'"]],2*delta.tensor(c(lambda=6))) checker( SVD$u %e% mark(SVD$u,"'",c("a","d")),2*delta.tensor(c(a=2,d=3))) # power.tensor A <- to.tensor(rnorm(120),c(a=2,b=2,c=5,d=3,e=2)) AAt <- A %e% mark(A,"'",c("a","b")) checker(power.tensor(AAt,c("a","b"),c("a'","b'"),-1),inv.tensor(AAt,c("a","b"))) checker(power.tensor(AAt,c("a","b"),c("a'","b'"),2), mul.tensor(AAt,c("a","b"),AAt,c("a'","b'"))) checker(power.tensor(power.tensor(AAt,c("a","b"),c("a'","b'"),1/pi), c("a","b"),c("a'","b'"),pi),AAt) AAt <- einstein.tensor(A , mark(A,"'",c("a","b")),by="e") checker(power.tensor(AAt,c("a","b"),c("a'","b'"),-1,by="e"), inv.tensor(AAt,c("a","b"),by="e")) checker(power.tensor(AAt,c("a","b"),c("a'","b'"),2,by="e"), mul.tensor(AAt,c("a","b"),AAt,c("a'","b'"),by="e")) checker(power.tensor(power.tensor(AAt,c("a","b"),c("a'","b'"),1/pi,by="e"), c("a","b"),c("a'","b'"),pi,by="e"),AAt) # to.matrix.tensor # A <- reorder.tensor(to.tensor(1:30,c(a=2,b=3,c=5)),c("c","a","b")) checker(to.matrix.tensor(A,"a",c("b","c")),matrix(1:30,nrow=2)) checker(to.matrix.tensor(A,c("a","b"),c("c")),matrix(1:30,nrow=6)) checker(to.matrix.tensor(A,c("a","b"),by=c("c")),structure(1:30,dim=c(6,1,5))) checker(to.matrix.tensor(A,c("a"),by=c("c")),structure(1:30,dim=c(2,3,5))) # untensor A <- reorder.tensor(to.tensor(1:30,c(a=2,b=3,c=5)),c("c","a","b")) checker(untensor(A,c("a","b"),pos=1),to.tensor(1:30,c(I1=6,c=5))) checker(untensor(A,c("a","b"),"new",pos=2),reorder(to.tensor(1:30,c(new=6,c=5)),2:1)) checker(untensor(A,list(u=c("a","b"),v=c(c="c"))),to.tensor(1:30,c(u=6,v=5))) # as.tensor checker(as.tensor(diag(5)),to.tensor(c(diag(5)),c(I1=5,I2=5))) # Slice tensor A <- reorder.tensor(to.tensor(1:30,c(a=2,b=3,c=5)),c("c","a","b")) checker(slice.tensor(A,"c",1:2),to.tensor(1:12,c(a=2,b=3,c=2))) checker(slice.tensor(A,"c",1,drop=TRUE),to.tensor(1:6,c(a=2,b=3))) checker(slice.tensor(A,"c",1,drop=FALSE),to.tensor(1:6,c(a=2,b=3,c=1))) # Indexing with [[]] A <- reorder.tensor(to.tensor(1:30,c(a=2,b=3,c=5)),c("c","a","b")) checker(A[[b=2]],slice.tensor(A,"b",2,drop=TRUE)) checker(A[[b=2:3]],slice.tensor(A,"b",2:3)) checker(A[[b=2:3,c=3:4]],slice.tensor(slice.tensor(A,"b",2:3),"c",3:4)) checker(A[[b=~q]],to.tensor(1:30,c(a=2,q=3,c=5))) #undrop.tensor checker(undrop.tensor(slice.tensor(A,"c",2,drop=TRUE),"c"), slice.tensor(A,"c",2,drop=FALSE)) # bind.tensor A <- to.tensor(1:6,c(a=2,b=3)) checker( bind.tensor(A,"a",A), to.tensor(c(1,2,1,2,3,4,3,4,5,6,5,6),c(a=4,b=3))) checker( bind.tensor(A,"b",A), to.tensor(c(1:6,1:6),c(a=2,b=6))) # einstein.tensor A <- to.tensor(1:6,c(a=2,b=3)) checker( einstein.tensor(A,A), mul.tensor(A,c("a","b"),A)) checker( einstein.tensor(A,A,by="b"), mul.tensor(A,c("a"),A,by="b")) checker( einstein.tensor(A,diag=A,by="b"), diagmul.tensor(A,c("a"),A,by="b")) # adding A <- to.tensor(1:30,c(a=2,b=3,c=5)) checker(A + reorder(A,c("b","a","c")),2*A) checker(A - reorder(A,c("b","a","c")),to.tensor(rep(0,30),c(a=2,b=3,c=5))) B <- to.tensor(rnorm(6),c(b=3,a=2)) checker( A + B , rep.tensor(B,5,name="c") + A ) checker( B + A , rep.tensor(B,5,name="c") + A ) checker( A + (B - A), rep.tensor(B,5,name="c") ) checker( B + (A - B), A ) C <- to.tensor(1:42,c(a=2,b=3,d=7)) checker( A+C , rep.tensor(A,7,name="d") + rep.tensor(C,5,name="c")) # Einstein checker( C %e% A , einstein.tensor(C,A)) # Riemann + drag checker( C %r% mark(A), mul.tensor(C,c(),mark(A),c())) A <- to.tensor(1:16,c(a=2,b=2,c=2,d=2)) gij <- to.tensor(c(1,0.5,0.5,1),c(i=2,j=2)) ginv <- c(solve(matrix(c(1,0.5,0.5,1),2))) gij1 <- to.tensor(ginv,c("^a"=2,a=2)) gij2 <- to.tensor(ginv ,c("^b"=2,b=2)) is.covariate(gij) checker( drag.tensor(gij,gij,c("i")) , to.tensor(c(1,0,0,1),c("^i"=2,"j"=2))) checker( drag.tensor(gij,gij,c("j")) , to.tensor(c(1,0,0,1),c("i"=2,"^j"=2))) checker( drag.tensor(gij,gij,c("i","j")), to.tensor(ginv,c("^i"=2,"^j"=2))) checker( drag.tensor(gij,gij,c("i","j"))[["^i"=~a,"^j"=~b]], riemann.tensor(gij[[i=~a,j=~b]],gij1,gij2)) checker( drag.tensor(A,gij,c("a","b")), einstein.tensor(A,gij1,gij2) ) ########################################################################### ### Names ########################################################################## # I <- to.tensor(diag(3),c(a=3,b=3),nn(a=3,b=3),what=1:2) checker(to.tensor(I),I) checker(as.tensor(I),I) checker(inv.tensor(I,"a","b"),I) R1 <- matrix(rnorm(9),nrow=3) R1i <- solve(R1) R2 <- to.tensor(R1,c(a=3,b=3),nn(a=3,b=3),what=1:2) R2i <- to.tensor(R1i,c(b=3,a=3),nn(b=3,a=3),what=1:2) checker(inv.tensor(R2,"a","b"),R2i) checker(inv.tensor(R2,"a","b",allowSingular=TRUE),R2i) R3 <- to.tensor(rnorm(15),c(a=3,z=5),nn(a=3,z=5)) checker(mul.tensor(R2i,"b",mul.tensor(R2,"a",R3)),R3) checker(solve.tensor(R2i,R3[[z=1]],"a"),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a"),mul.tensor(R2,"a",R3)) checker(solve.tensor(R2i,R3[[z=1]],"a",allowSingular=TRUE),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a",allowSingular=TRUE),mul.tensor(R2,"a",R3)) checker(solve.tensor(rep(R2i,4,1,"K"),R3[[z=1]],"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),rep(R3[[z=1]],4,1,"K"),"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,rep(R3[[z=1]],4,1,"K"),"a",by="K"),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),R3[[z=1]],"a",by="K",allowSingular=TRUE),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(rep(R2i,4,1,"K"),rep(R3[[z=1]],4,1,"K"),"a",by="K",allowSingular=TRUE),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,rep(R3[[z=1]],4,1,"K"),"a",by="K",allowSingular=TRUE),rep(mul.tensor(R2,"a",R3[[z=1]]),4,1,"K")) checker(solve.tensor(R2i,R3,"a"),mul.tensor(R2,"a",R3)) checker(solve.tensor(R2i,R3[[z=1]],"a",allowSingular=TRUE),mul.tensor(R2,"a",R3[[z=1]])) checker(solve.tensor(R2i,R3,"a",allowSingular=TRUE),mul.tensor(R2,"a",R3)) summary(I) A <- to.tensor(c(diag(3)),c(a=3,b=3),nn(a=3,b=3)) A <- to.tensor(c(1,1,1,0,1,1,0,0,1),c(a=3,b=3),nn(a=3,b=3)) checker(mul.tensor(A,"b",A,"a"),to.tensor(c(A%*%A),c(a=3,b=3),nn(a=3,b=3))) A <- to.tensor(c(1,1,1,1,0,1,1,1,0,0,1,1),c(a=4,b=3),nn(a=4,b=3)) checker(mul.tensor(A,"b",A[[a=~c]],"b"), to.tensor(c(A%*%t(A)),dim=c(a=4,c=4),nn(a=4,a=4))) A <- to.tensor(rnorm(15),c(a=5,b=3),nn(a=5,b=3)) checker(mul.tensor(A,"b",A[[a=~c]],"b"), to.tensor(c(A%*%t(A)),dim=c(a=5,c=5),nn(a=5,a=5))) # A <- to.tensor(c(1,1,1,0,1,1,0,0,1),c(a=3,b=3)) solve.tensor(mul.tensor(I,"b",I,"a"),I,"a","a") checker(solve.tensor(mul.tensor(I,"b",I,"a"),I,"a","b"),I) mul.tensor(A,"b",I,"a") # + - * / A <- to.tensor(1:4,c(i=4)) B <- to.tensor(1:4,c(j=4)) checker((A+B-A-B),0*(A %e% B)) checker((A-B-A+B),0*(A %e% B)) checker((A+B-B-A),0*(A %e% B)) checker((A-B+B-A),0*(A %e% B)) checker((A-A+B-B),0*(A %e% B)) checker((A*B/A/B),one.tensor(c(dim(A),dim(B)))) checker((A/B/A*B),one.tensor(c(dim(A),dim(B)))) checker((A*B/B/A),one.tensor(c(dim(A),dim(B)))) checker((A/B*B/A),one.tensor(c(dim(A),dim(B)))) checker((A/A*B/B),one.tensor(c(dim(A),dim(B)))) # $,|,^ A <- to.tensor(1:6,c(a=1,b=2,c=3)) checker( names(A$ijk) , c("i","j","k") ) checker( names(A$i.j.k) , c("i","j","k") ) checker( A$ijk^c("a","b","c") , A ) checker( A$ijk^"a.b.c" , A ) checker( A$ijk^"$abc" , A ) checker( names(A|"$bca"), c("b","c","a")) checker( names(A|"b.c.a"), c("b","c","a")) checker( names(A|"$b.c.a"), c("b","c","a")) checker( names(A|c("$b","c.a")), c("b","c","a")) checker( names(A|c("$b","c","a")), c("b","c","a")) checker( names(A|c("$","b","c","a")), c("b","c","a")) checker( names(A|c("b.c.a")), c("b","c","a")) # slice.tensor<- #A <- to.tensor(1:24,c(a=2,b=3,c=4)) #B <- to.tensor(25:48,c(a=2,b=3,c=4)) #slice.tensor(A,"c",1:2)<- slice.tensor(B,"c",1:2) #checker(slice.tensor(A,"c",1:2),slice.tensor(B,"c",1:2)) #slice.tensor(A,"b",2)<- slice.tensor(B,"b",2) #checker(slice.tensor(A,"b",2),slice.tensor(B,"b",2)) tensorA/tests/examples.R0000644000176200001440000002270213154210237014771 0ustar liggesusersrequire(tensorA) if(FALSE) { # Commands for testing debugger() options(error=dump.frames) detach("package:tensorA") library(tensorA,lib.loc="../../tensorA.Rcheck") } #Aidx(3,4) # c(1,2,3,1,2,3,1,2,3,1,2,3) #Bidx(3,4) # c(1,1,1,2,2,2,3,3,3,4,4,4) # gsi.eps #gsi.eps # 1E-10 # gs.setarg #tmp <- function(a=2,b=3) {a*b} #if( gsi.setarg(tmp,b=5)()!=10 ) # stop("Fehler gs.setarg") # to.tensor to.tensor(c(1,2,3)) dim(to.tensor(c(1,2,3))) set.seed(23) A <- to.tensor(1:20,c(U=2,V=2,W=5)) A dim(A) names(A) dimnames(A) ftable(to.tensor(A)) ftable(to.tensor(c(A),dim(A))) ftable(to.tensor(c(A),dim(A),dimnames(A))) ftable(to.tensor(A,dim(A),what=1:3)) ftable(to.tensor(A,dim(A)[1:2],dimnames(A)[1:2],1:2)) ftable(to.tensor(A,dim(A)[1],dimnames(A)[1],1,addIndex=TRUE)) Anamed <- A #dimnames(Anamed)[["U"]]<- gsi.stdnames(2,"u") #dimnames(Anamed)[["V"]]<- gsi.stdnames(2,"v") #dimnames(Anamed)[["U"]]<- gsi.stdnames(dim(A)["w"],"w") ftable(Anamed) ftable(to.tensor(Anamed)) ftable(to.tensor(c(Anamed),dim(Anamed))) ftable(to.tensor(c(Anamed),dim(Anamed),dimnames(Anamed))) ftable(to.tensor(Anamed,dim(Anamed),what=1:3)) ftable(to.tensor(Anamed,dim(Anamed)[1:2],dimnames(Anamed)[1:2],1:2)) ftable(to.tensor(Anamed,dim(Anamed)[1],dimnames(Anamed)[1],1,addIndex=TRUE)) ftable(to.tensor(Anamed,dim(Anamed)[1],dimnames(Anamed)[1],1,addIndex="I")) B <- to.tensor(1:30,list(U=c("a","b","c"),V=c("B1","B2"),W=1:5)) B dim(B) names(B) dimnames(B) B2 <- to.tensor(B,c(x=1,y=3,z=1)) ftable(B2) C <- to.tensor(1:20,c(A=4,B=5)) C C <- to.tensor(C,c(A1=2,A2=2)) C D <- C C <- to.tensor(C,c(A1=1,A2=4),what=c("A2","A1")) C C <- to.tensor(C,c(A1=2),what=c("A2","A1"),addIndex="Q") C Cnamed <- to.tensor(1:20,c(A=4,B=5)) dimnames(Cnamed)[["B"]]<-LETTERS[1:5] dimnames(Cnamed)[["A"]]<-letters[1:4] Cnamed Cnamed <- to.tensor(Cnamed,c(A1=2,A2=2)) dimnames(Cnamed) Dnamed <- Cnamed Cnamed <- to.tensor(Cnamed,c(A1=1,A2=4),what=c("A2","A1")) ftable(Cnamed) Cnamed <- to.tensor(Cnamed,c(A1=2),what=c("A2","A1"),addIndex="Q") ftable(Cnamed) # names names( C ) names( C ) <- c("A1","A2","A3") C names(C) names( Cnamed ) names( Cnamed ) <- c("A1","A2","A3") ftable(Cnamed) names(Cnamed) # norm norm.tensor(C) - sqrt(sum((1:20)^2)) norm.tensor(Cnamed) - sqrt(sum((1:20)^2)) # margin.tensor margin.tensor(C,"A1") margin.tensor(Cnamed,"A1") # diagmul.tensor norm.tensor( diagmul.tensor(C,D=C)-C^2 ) # 0 norm.tensor( diagmul.tensor(Cnamed,D=Cnamed)-Cnamed^2 ) # 0 Cr <- rep.tensor(C,10,1,name="K") #norm(diagmul.tensor(Cr,names(C),C,names(C),by="K") - diagmul.tensor(Cr,names(C),D=Cr,names(C),by="K")) # 0 # pos.tensor pos.tensor(dim(C)) # reorder.tensor reorder.tensor(C,c(2)) reorder.tensor(C,c("A3","A2")) ftable(reorder.tensor(Cnamed,c(2))) ftable(reorder.tensor(Cnamed,c("A3","A2"))) # mul.tensor AA <- mul.tensor(A,1,mark(A),1) AA dim(AA) AA <- mul.tensor(Anamed,1,mark(Anamed),1) ftable(AA) dim(AA) A <- to.tensor(as.complex(1:20),c(U=2,V=2,W=5)) AA <- mul.tensor(A,1,mark(A),1) # # rep.tensor rep(A,3,name="Q",pos=4) rep(Anamed,3,name="Q",pos=4) ftable(rep(Anamed,3,name="Q",pos=4)) # trace.tensor trace.tensor(D,"A1","A2") dim(trace.tensor(D,"A1","A2")) trace.tensor(Dnamed,"A1","A2") dim(trace.tensor(Dnamed,"A1","A2")) #delta.tensor delta.tensor(c(a=2)) delta.tensor(c(a=2,b=3)) dim(delta.tensor(c(a=2,b=4),".")) ftable(delta.tensor(c(a=2,b=2))) ftable(delta.tensor(c(a=2,b=3))) # tripledelta.tensor tripledelta.tensor(c(a=2)) ftable(tripledelta.tensor(c(a=2))) dim(tripledelta.tensor(c(a=2),"1","2")) # mark.tensor A mark(A,"*") names(mark(A,"*")) mark(A,"*","U") names(mark(A,"*","U")) names(mark(A,"*",2:3)) Anamed mark(Anamed,"*") names(mark(Anamed,"*")) mark(Anamed,"*","U") names(mark(Anamed,"*","U")) names(mark(Anamed,"*",2:3)) # mark.numeric mark(dim(A),"*") mark(dim(A),"*","U") mark(dim(A),"*",2:3) # mark.character mark(names(A),"*") mark(names(A),"*","U") mark(names(A),"*",2:3) # inv.tensor E <- to.tensor(rnorm(16),c(a=2,b=2,A=2,B=2)) Ei <- inv.tensor(E,c("A","B")) E dim(Ei) names(Ei) dim(mul.tensor(E,c("A","B"),mark(Ei,c("a","b")))) dim(delta.tensor(c(a=2,b=2))) norm.tensor(mul.tensor(E,c("A","B"),mark(Ei,c("a","b")))-delta.tensor(c(a=2,b=2))) AA <- to.tensor(rnorm(20),c(a=2,b=2,C=5)) inv.tensor(AA,"a",by="C") E <- to.tensor(rnorm(16),c(a=2,b=2,A=2,B=2),list(c("a","b"),NULL,c("A1","A2"),c("B1","B2"))) Ei <- inv.tensor(E,c("A","B")) E dim(Ei) names(Ei) norm.tensor(mul.tensor(E,c("A","B"),mark(Ei,c("a","b")))-delta.tensor(c(a=2,b=2))) ftable(round(mul.tensor(E,c("A","B"),mark(Ei,c("a","b")))),3) # solve.tensor X1 <- to.tensor( c( 0 , 10 , 20 ,30 , 2,2,2,2, 45,21,34,5, 67,0,0,0 ), c(a=2,b=2,"a'"=2,"b'"=2) ) b1 <- to.tensor(c( 0,10,20,30),c(a=2,b=2)) solve.tensor(X1,b1,c("a","b")) norm.tensor(solve.tensor(X1,b1,c("a","b")) - to.tensor(c( 1,0,0,0 ),c(a=2,b=2))) #0 dimnames(X1)[["a"]]<- 1:2 dimnames(X1)[["b"]]<- 1:2 dimnames(X1)[["a'"]]<- 1:2 dimnames(b1)[["a"]] <-c("M","K") solve.tensor(X1,b1,c("a","b")) norm.tensor(solve.tensor(X1,b1,c("a","b")) - to.tensor(c( 1,0,0,0 ),c(a=2,b=2))) #0 # chol.tensor Xs <- einstein.tensor(X1,a="a*",b="b*",X1) dim(Xs) norm.tensor( Xs - mul.tensor(X1,c("a'","b'"),mark(X1,"*",c("a","b")),c("a'","b'"))) #0 RXs <- chol.tensor(Xs,c("a","b"),c("a*","b*")) norm.tensor(mul.tensor(RXs,1,mark(RXs),1) - Xs ) # 0 dimnames(X1)[["a"]]<-c("q1","q2") dimnames(X1)[["a'"]]<-c("p1","p2") Xs <- einstein.tensor(X1,a="a*",b="b*",X1) dimnames(Xs) dimnames(Xs)["a*"]<-list(NULL) dim(Xs) norm.tensor( Xs - mul.tensor(X1,c("a'","b'"),mark(X1,"*",c("a","b")),c("a'","b'"))) #0 RXs <- chol.tensor(Xs,c("a","b"),c("a*","b*")) norm.tensor(mul.tensor(RXs,1,mark(RXs),1) - Xs ) # 0 # level.tensor dim(X1) level.tensor(X1) # 4 # svd.tensor SXs <- svd.tensor(X1,c("a","b")) SXs DXs <- to.tensor(c(diag(SXs$d)),c(lambda=length(SXs$d),"lambda'"=length(SXs$d))) SXs2<-mul.tensor(mul.tensor(SXs$u,"lambda",DXs,"lambda'"),"lambda",SXs$v) # SXs2 norm.tensor(SXs2-X1) #0 # power.tensor Xs rXs <- power.tensor(Xs,c(1,2),c(3,4),p=0.5) Xs2 <- mul.tensor(rXs,c("a","b"),rXs,c("a*","b*")) Xs2 norm.tensor((Xs-Xs2)) # 0 # to.matrix.tensor to.matrix.tensor(X1,j=c("a'","b'")) to.matrix.tensor(X1,i=c("a","b") ) # gsi.stdnames #gsi.stdnames(7,avoid=gsi.stdnames(5)) #gsi.stdnames(3,"B") #gsi.stdnames(3,"B",avoid=gsi.stdnames(5)) # gsi.namedlist #gsi.namedlist(c("Holla","Hoppla"),1,3) # gsi.lefts/gsi.rights #gsi.lefts(X1) #gsi.rights(X1) # untensor tmp <- untensor(X1,c("a","b"),"A",1) X1 <- to.tensor( c( 0 , 10 , 20 ,30 , 2,2,2,2, 45,21,34,5, 67,0,0,0 ), c(a=2,b=2,"a'"=2,"b'"=2) ) tmp <- untensor(X1,c("a","b"),"A",1) tmp untensor(tmp,c("a'","b'"),"B",2) # gsi.untensornames untensor(A,c(1,2)) untensor(Anamed,c(1,2)) # as.tensor as.tensor(X1) as.tensor(c(1,2,3)) dim(as.tensor(c(1,2,3))) # renorm.matrix / renorm.tensor #renorm.rows(X1[,,1,1]) #renorm.tensor(X1[,,1,1]) #norm.tensor(renorm.tensor(X1,c(1,2)),c(1,2)) # 1 1 1 1 # slice.tensor slice.tensor(B,"U","a") slice.tensor(B,"U","a",drop=TRUE) slice.tensor(B,"U",c("a","c")) # [[]].tensor B[[U="a"]] B[[U="a",drop=FALSE]] old <- B[[U=c("a","c")]] B[1:2,,] B[[1]] B[[U=c("a","c")]]<-7 B B[[U=c("a","c")]]<- old # undrop.tensor undrop.tensor(A,name="Q",2) # combineCF.tensor # combineCF.tensor(Xs,c("a","b"),Xs,c("a","b")) # bind.tensor bind.tensor(A,1,einstein.tensor(A,V="q","U"="V",q="U"),2) bind.tensor(A,1,A,1) bind.tensor(Anamed,1,einstein.tensor(Anamed,V="q","U"="V",q="U"),2) bind.tensor(Anamed,1,Anamed,1) # toPos.tensor #toPos.tensor(A,c("A","U","I")) # error toPos.tensor(A,rev(names(A))) # einstein.tensor einstein.tensor(A,U="U'",B) einstein.tensor(A,U="U'",mark(B,"k")) einstein.tensor(A,U="U'",mark(B,"k"),V="Vk",W="Wk") einstein.tensor(A,U="U'",mark(B,"k"),V="Vk",W="Wk",1/10) einstein.tensor(A,U="U'",mark(B,"k"),V="Vk",W="Wk",diag=to.tensor(c(1,1/10,1/100),c(Uk=3))) ftable(einstein.tensor(A,U="U'",B)) ftable(einstein.tensor(A,U="U'",mark(B,"k"))) ftable(einstein.tensor(A,U="U'",mark(B,"k"),V="Vk",W="Wk")) ftable(einstein.tensor(A,U="U'",mark(B,"k"),V="Vk",W="Wk",1/10)) ftable(einstein.tensor(A,U="U'",mark(B,"k"),V="Vk",W="Wk",diag=to.tensor(c(1,1/10,1/100),c(Uk=3)))) # %e% [[]] dim(A[[U=~M]]) A[[U=~M]] %e% B A[[U=~M,V=~"L"]] %e% B # firstnames/secondnames #firstnames(secondnames(names(A),"2"),"2") contraname(names(A)) dim(A) dim(B) add.tensor(A,A)/2-A norm.tensor(add.tensor(A,A)/2-A) tmp <- add.tensor( A,A[[U=~K]] ,op="-") dim(tmp) ftable(tmp) A %e% A norm.tensor(reorder(A,c(2,3,1)) - A) # Dragging g <- to.tensor(c(1,2,0,1),c(i=2,j=2)) A <- to.tensor(rnorm(8),c(a=2,b=2,c=2)) A2 <- drag.tensor(A,g,c("b","c")) A2 names(A2) as.covariate(names(A2)) as.contravariate(names(A2)) is.covariate(A2) is.contravariate(A2) riemann.tensor(A2,g) # ####################### d1 <- c(a=2,b=2) d2 <- c("a'"=2,"b'"=2) m <- to.tensor(1:4,d1) V <- delta.tensor(d1)+one.tensor(c(d1,d2)) V X <- (power.tensor(V,c("a","b"),c("a'","b'"),p=1/2) %e% to.tensor(rnorm(1000*2*2),c(i=1000,d2))) + m # The mean mean.tensor(X,along="i") # Full tensorial covariance: var.tensor(X,along="i") # Variance of the slices X[[b=1]] and X[[b=2]] : var.tensor(X,along="i",by="b") # Covariance of the slices X[[b=1]] and X[[b=2]] : var.tensor(X[[b=1]],X[[a=~"a'",b=2]],along="i") ## ######################### Var <- function(x,along) { one <- one.tensor(dim(x)[along]) M <- one/(one%e%one) z <- x- M %e% x einstein.tensor( z,mark(z),by=names(M)) %e% M } A <- to.tensor(rnorm(4000),c(i=2,j=2,s=1000)) dim(A)["s"] Var(A,"s") delta.tensor(c(i=2,j=2)) Var(A,"s")-delta.tensor(c(i=2,j=2)) tensorA/src/0000755000176200001440000000000013154210237012452 5ustar liggesuserstensorA/src/tensora.c0000644000176200001440000000505213753463622014310 0ustar liggesusers#include #include #define xx(i,j,l) (X[(i)+dimx[0]*((j)+dimx[1]*(l))]) #define yy(i,j,l) (Y[(i)+dimy[0]*((j)+dimy[1]*(l))]) #define ee(i,j,l) (E[(i)+dime[0]*((j)+dime[1]*(l))]) static R_NativePrimitiveArgType tensoramulhelper_t[] = { /* int *dimx, int *dimy, int *dime, double *X, double *Y, double *E*/ INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP }; /** The function performs many matrix multiplications in parallel. */ extern void tensoramulhelper(int *dimx,int *dimy,int *dime, double *X,double *Y,double *E) { int i,j,k,l; double tmp; if(dimx[1]!=dimy[0]||dimx[2]!=dimy[2]||dimx[0]!=dime[0]|| dimy[1]!=dime[1]||dimx[2]!=dime[2]) error("multensorhelper: Dimension missmatch"); for(l=0;l Copyright (C) 19yy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) 19yy name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. tensorA/R/0000755000176200001440000000000013753464457012110 5ustar liggesuserstensorA/R/TensorAx.R0000755000176200001440000013340313753464526014002 0ustar liggesusers #.First.lib <- function(lib,pkg) { # library.dynam("tensorA",pkg,lib) #} #.Last.lib <- function(libpath) { # library.dynam.unload("tensorA",libpath) #} gsi.cat <- function(...,l=list(...)) { n <- names(l) if( length(l)>1 || !is.null(names(l)) ) { cat("{") for( i in 1:length(l) ) { if( !is.null(n) ) cat(n[i],"=",sep="") gsi.cat(l=l[[i]]) cat(" ") } cat("}") } else if( length(l)>0 ) try(cat(l[[1]])) } if( FALSE ) { gsi.debug <- function(...) {try(gsi.cat(...))} gsi.debugn<- function(...) {try(gsi.cat(...))} gsi.debugf<- function(...) {print(match.call("",call=sys.call(sys.parent())))} gsi.debugr<- function(X) { cat(as.character(as.list(sys.call(sys.parent()))[[1]]),"\n"); gsi.cat(dim(X),"\n"); X } } else { gsi.debug <- function(...) {list(...)} gsi.debugn<- function(...) {} gsi.debugf <- function(...) {} gsi.debugr <- function(X) {X} } # Some nice conveniance functions for matrices not worth a package #Aidx<- function(n,m) {rep(1:n,m)} #Bidx<- function(n,m) {rep(1:m,each=n)} # Modify the formal arguments of a function gsi.setarg <-function(fun,...) { env <- environment(fun) P <- formals(fun) L <- list(...) P[names(L)]<-L formals(fun)<-P environment(fun)<-env fun } # # The index described by what is restructured to a tensor of the structure # given by dims. # to.tensor <- function(X,...) UseMethod("to.tensor",X) to.tensor.default <- function(X,dims=NULL,ndimnames=NULL,what=1,addIndex=FALSE,...){ # Make what the first unstructured index gsi.debugn("to.tensor$dims=",dims) if( !missing(what) ) X <- untensor(X,what) # Process prior structure if( is.null(d<- dim(X)) ) dim(X) <- d <- c("I"=length(X)) if( is.null( odimnames <- dimnames(X) ) ) odimnames <- rep(list(NULL),length(d)) else attr(X,"dimnames") <- NULL if( missing(dims) ) { if( is.null(ndimnames) ) { if( is.null(names(dim(X))) ) if( is.null(names(dimnames(X))) ) names(dim(X))<-gsi.stdnames(length(dim(X)),"I") else names(dim(X))<-names(dimnames(X)) names(odimnames) <- names(dim(X)) attr(X,"dimnames") <- odimnames if( length(dim(X))==2 ) class(X) <- c("tensor","matrix") else class(X) <- "tensor" return(gsi.debugr(X)) } else { dims <- sapply(ndimnames,length) names(dims) <- names(ndimnames) } } else if( is.list(dims) ) { ndimnames <- dims dims <- sapply(ndimnames,length) names(dims) <- names(ndimnames) } # Process new structure # Process dimension names if( !is.null(dims) && length(dims) == 0 && length(X) <2 ) return( c(X) ) newnames <- names(dims) if( is.null(newnames) ) { if( ! is.null(ndimnames) ) newnames <- names(ndimnames) if( is.null(newnames)) newnames <- gsi.stdnames(length(dims),"I",avoid=names(dim(X)[-1])) names(dims) <- newnames } pl <- prod(dims) # Repeat elements in case needed if( d[1]==1 ){ X <- rep(c(X),each=pl) d[1]<-pl } # Check if( is.null(ndimnames) ) ndimnames <- rep(list(NULL),length(dims)) if( length(ndimnames) != length(dims) ) stop("ndimnames and dims don't match: ",length(ndimnames)," ",length(dims)) # Define new structure if( addIndex==FALSE ) { if( d[1] != prod(dims)) stop("to.tensor: Not right number of elements (1)",d[1]," ",dims) dim(X) <- c(dims,d[-1]) dimnames(X) <- c(ndimnames,odimnames[-1]) } else { if( d[1] %% prod(dims)) stop("to.tensor: Not right number of elements (2)",d[1]," ",dims) dim(X) <- c(dims,"@"=d[1]/pl,d[-1]) if( is.character(addIndex) ) names(dim(X))[length(dims)+1] <- addIndex dimnames(X) <- c(ndimnames,list(NULL),odimnames[-1]) } # Return names(dimnames(X))<-names(dim(X)) if( length(dim(X)) == 2 ) class(X) <-c("tensor","matrix") else class(X) <- c("tensor") if( length(names(X))!=length(unique(names(X)))) warning("Tensor with duplicated names generated: ", paste(names(X),col=" ")) gsi.debugr(X) } ftable.tensor <- function(x,...) { n <- length(dim(x)) dn <- dimnames(x) nams <- names(dim(x)) if( is.null(nams) ) nams <- letters[1:n] if( is.null(dn) ) dn <- rep(list(NULL),n) dimnames(x) <- lapply(1:n,function(i) { if( is.null(dn[[i]]) ) gsi.stdnames(dim(x)[i],nams[i]) else dn[[i]] }) class(x) <- "table" ftable(x,...) } names.tensor <- function(x) { return(names(dim(x))) } "names<-.tensor" <- function(x,value) { if( length(value) != length(unique(value))) warning("Duplicated names assigned to tensor ",paste(value,col=" ")) dn <- dimnames(x) names(dim(x)) <- value # gueltig names(dn) <- value dimnames(x) <- dn x } "dim<-.tensor" <- function(x,value) { if( length(value) > 0 ) NextMethod() else if( length(x) > 1 ) stop("Not fitting dimension") else NextMethod(value=NULL) } "dimnames<-.tensor" <- function(x,value) { if( is.null(value) ) value <- rep(list(NULL),length(dim(x))) names(value) <- names(dim(x)) if( is.null(dim(x)) ) return(NULL) NextMethod(x,value) } dimnames.tensor <- function(x) { dn <- NextMethod("dimnames",x) if( is.null(dn) ) return( structure(rep(list(NULL),length(dim(x))),names=names(x)) ) else structure(dn,names=names(x)) } # Euklidische Norm des Tensors norm <- function(X,...) UseMethod("norm") norm.tensor <- function(X,i=NULL,...,by=NULL) { gsi.debugn("to.tensor$dimX=",dim(X),"i=",i," by=",by) if( missing(i) ) if(is.null(by)) return(sqrt(sum(abs(X)^2))) else i <- (1:level.tensor(X))[-toPos.tensor(X,by)] i <- toPos.tensor(X,i) sqrt(margin.tensor(abs(X)^2,i)) } opnorm <- function(X,...) UseMethod("opnorm") opnorm.tensor <- function(X,i=NULL,...,by=NULL) { svd.tensor(X,i=i,by=by,name="opnormdim")$d[[opnormdim=1]] } margin.tensor <- function(X,i=NULL,by=NULL) { i <- toPos.tensor(X,i,by=by) A <- gsi.matrify(X,i) to.tensor( c(rep(1,nrow(A)) %*% A),dim(X)[-i], dimnames(X)[-i]) } diagmul.tensor <- function(X,i=names(D),D,j=i,by=NULL) { gsi.debugn("diagmul.tensor$dim=",dim(X)," i=",i," j=",j," by"=by) if( !is.tensor(X) ) warning("X must be tensor in diagmul.tensor") if( !is.tensor(D) ) warning("D must be tensor in diagmul.tensor") if( is.tensor(i) || is.tensor(j) ) stop("Tensor provided as index in diagmul.tensor") if( is.null(by) ) byx<-byy<-c() else { byx <- toPos.tensor(X,by,missing.ok=TRUE) byy <- toPos.tensor(D,by,missing.ok=TRUE) by <- by[!is.na(byx)&!is.na(byy)] } ndims <- paste(":!",1:length(c(i,by)),sep="") odims <- names(X)[c(toPos.tensor(X,i),toPos.tensor(X,by))] names(X)[c(toPos.tensor(X,i),toPos.tensor(X,by))] <- ndims names(D)[c(toPos.tensor(D,j),toPos.tensor(D,by))] <- ndims erg <- mul.tensor(X,c(),D,c(),by=ndims) #print(dim(erg)) names(erg)[toPos.tensor(erg,ndims)]<-odims erg } #diagmul.tensor <- function(X,i=names(D),D,j=i,by=NULL) { # if( !is.tensor(X) ) X <- as.tensor(X) # if( !is.tensor(D) ) D <- as.tensor(D) # if( length(by) > 0 ) # xby <- (!is.na(toPos.tensor(X,by,missing.ok=TRUE)) & # !is.na(toPos.tensor(D,by,missing.ok=TRUE))) # else # xby<-numeric(0) # j <- toPos.tensor(D,c(j,by[xby])) # i <- toPos.tensor(X,c(i,by[xby])) # D <- reorder.tensor(D,j) # if( length(i)!=length(dim(D)) || any(dim(X)[i]!=dim(D)[j]) ) # stop("diagmul.tensor dimension mismatch",length(i)!=length(j),dim(X)[i]!=dim(D)[j]) # d <- dim(X) # dn <- dimnames(X) # gsi.unmatrify(gsi.matrify(X,i)*c(D),d,i,dn) #} is.tensor <- function(X) return( "tensor" %in% class(X) ) # Erzeugt alle Tensorpositionen als Matrix # in Speicherreihenfolge aus einer Dimension pos.tensor <- function(d) { if( is.list(d) ) d <- sapply(d,length) for(i in 1:length(d)){ if(i>1) E <- cbind(E[rep(1:dim(E)[1],d[i]),], rep(1:d[i],rep(dim(E)[1],d[i]))) else E <- cbind(1:d[i]) } colnames(E) <- names(d) E } # Sortiert die Tensorindices so um, dass # die in i genannten in dieser Reihenfolge zuerst kommen reorder.tensor <- function(x,i=NULL,...,by=NULL) { #cat(i) d <- dim(x) i <- toPos.tensor(x,i,by=by) i <- unique(c(i,1:length(d))) odimnames <- dimnames(x) ndim <- d[i] ndx <- c(1) weights<- gsi.weights(d) for(j in 1:length(d)) ndx <- rep(ndx,ndim[j])+ rep((1:ndim[j]-1)*weights[i[j]], rep(length(ndx),ndim[j])) to.tensor(x[ndx],ndim,ndimnames=odimnames[i]) } "^.tensor" <- function(x,y) { if( is.character(y) && is.tensor(x) ) { if( substr(y[1],1,1) == "$" ) { y[1] <- substr(y[1],2,10000) if( length( grep("[.]",y) ) > 0 ) i <- unlist(strsplit(y,"[.]")) else i <- unlist(strsplit(y,"")) } else i <- unlist(strsplit(y,"[.]")) names(x)[1:length(i)]<-i x } else if( length(y) == 0 ) { x } else NextMethod() } "$.tensor" <- renamefirst.tensor <- function(x,y) { if( is.character(y) && is.tensor(x) ) { if( length(grep("[.]",y))>0 ) i <- unlist(strsplit(y,"[.]")) else i <- unlist(strsplit(y,"")) names(x)[1:length(i)]<-ifelse(is.covariate(names(x)[1:length(i)]), i,contraname(i)) x } else if( length(y) == 0 ) { x } else NextMethod() } "|.tensor" <- function(x,y) { if( is.character(y) && is.tensor(x) ) { if( substr(y[1],1,1) == "$" ) { y[1] <- substr(y[1],2,10000) if( length( grep("[.]",y) ) > 0 ) i <- toPos.tensor(x,unlist(strsplit(y,"[.]"))) else i <- toPos.tensor(x,unlist(strsplit(y,""))) } else i <- toPos.tensor(x,unlist(strsplit(y,"[.]"))) reorder.tensor(x,i) } else if( length(y) == 0 ) { x } else NextMethod() } # Gibt die zur Umsortierung im Speicher noetige Indexreihen # - folge um die in i genannten Indices nach vorn zu holen reorder.tidx <- function(x,i,...){ d <- x i <- unique(c(i,1:length(d))) weights<- gsi.weights(d) ndim <- d[i] ndx <- c(1) for(j in 1:length(d)) ndx <- rep(ndx,ndim[j])+ rep((1:ndim[j]-1)*weights[i[j]], rep(length(ndx),ndim[j])) ndx } # Macht aus einer Reihe nach vorne zu holender Indices # die komplette umsortierung, so dass alle in ihrer neuen # Reihenfolge auftreten gsi.fullreorder <- function(d,first=NULL,last=NULL){ if( length(last)>0 ) unique(c(first,rev(unique(rev(c(1:length(d),last)))))) else unique(c(first,1:length(d))) } # # Wie gsi.fullreorder, nur dass die indices i nach hinten # geschoben werden # #gsi.fullreorder.anti <- function(d,i){ # i <- unique(i) # li<-length(i) # ld<-length(d) # if(li==ld) return(gsi.fullreorder(d,i)) # unique(c(i,1:ld))[c((li+1):ld,1:li)] #} gsi.weightedndx <- function(d,w){ ndx <- c(1) for(j in 1:length(d)) { ndx <- rep(ndx,d[j])+ rep( ((1:d[j]-1)*w[j]),rep(length(ndx),d[j]) ) } ndx } # erzeugt den multiplicator fuer jeden Arrayindex gsi.weights <- function(d){ cumprod(c(1,d[-length(d)])) } # Gibt den Vektor der uebrigen indices zurueck gsi.rest <- function(d,i){ if( length(i)==0 ) return(1:length(d)) unique(c(i,1:length(d)))[-(1:length(i))] } # invertiert eine Permutation gsi.invperm <- function(i){ if( length(i) == 0 ) return(1) i <- unique(c(i,1:max(i))) j <- numeric(length(i)) j[i]<-1:length(i) j } gsi.matrify <- function(X,i){ matrix(reorder.tensor(X,i),nrow=prod(dim(X)[i])) } gsi.unmatrify <- function(X,d,i,dn=NULL){ i <- gsi.fullreorder(d,i) ii<- gsi.invperm(i) dim(X) <- d[i] reorder.tensor(X,ii) } gsi.lefts <-function(X) { n <- length(dim(X)) if( n== 2 ) return(1) else c(1,3:((n/2)+1)) } gsi.rights <- function(X) { n <-length(dim(X)) if( n== 2 ) return(2) else (1:n)[-c(1,3:(n/2+1))] } gsi.without <- function(l,i) if(length(i)>0) l[-i] else l # # Collabiert zwei tensoren ueber die genannten Indices # i fuer X und Y fuer j # mul.tensor <- function(X,i=c(),Y,j=i,by=NULL){ gsi.debugn("mul.tensor$dimX=",dim(X)," i=",i," dimY=",dim(Y)," j=",j," by"=by) if( ! is.tensor(X) || ! is.tensor(Y) ) warning("Nontensors multiplied with mul.tensor") X <- as.tensor(X) Y <- as.tensor(Y) if( is.tensor(i) || is.tensor(j) ) stop("Tensor provided as index in mul.tensor") if(length(i)==0 && is.null(by)) { # "Outer Product" if( is.null(dim(X)) ) dim(X) <- length(X) if( is.null(dim(Y)) ) dim(Y) <- length(Y) E <- rep(c(X),length(Y))*rep(c(Y),rep(length(X),length(Y))) return( gsi.debugr(to.tensor(c(E),c(dim(X),dim(Y)),ndimnames=c(dimnames(X),dimnames(Y))))) } #if( missing(j)) # j <- i j <- toPos.tensor(Y,j) i <- toPos.tensor(X,i) if( ! is.null(by) ) { # parallization is only done when present in both and not used byi <- toPos.tensor(X,by,missing.ok=TRUE) byj <- toPos.tensor(Y,by,missing.ok=TRUE) parallel <- !is.na(byi) & !is.na(byj) & !(byi %in% i) & !(byj %in% j) byi <- byi[parallel] byj <- byj[parallel] } else { byi <- byj <- c() } dx <- dim(X) dy <- dim(Y) if(length(i)!=length(j) || any(dx[i]!=dy[j]) ) stop("mul.tensor: i incompatible to j") if(any(dx[byi]!=dy[byj]) ) stop("mul.tensor: by incompatible between tensors") rix <- gsi.fullreorder(dx,last=c(i,byi)) riy <- gsi.fullreorder(dy,j,last=byj) inner <- prod(dx[i]) para <- prod(dy[byj]) outerx <- prod(gsi.without(dx,c(i,byi))) outery <- prod(gsi.without(dy,c(j,byj))) dime <- c(gsi.without(dx,c(i,byi)),gsi.without(dy,c(j,byj)),dx[byi]) dnX <- dimnames(X) dnY <- dimnames(Y) ndim <- c(gsi.without(dnX,c(i,byi)),gsi.without(dnY,c(j,byj)),dnX[byi]) if( is.null(dnX) ) dnX <- rep(list(NULL),length(dim(X))) if( is.null(dnY) ) dnY <- rep(list(NULL),length(dim(Y))) xtidx <- reorder.tidx(dx,rix) ytidx <- reorder.tidx(dy,riy) dimx <- c(outerx,inner,para) if( is.complex(X) || is.complex(Y) ) { E <- .C(tensoraCmulhelper, dimx=as.integer(c(outerx,inner,para)), as.integer(c(inner,outery,para)), as.integer(c(outerx,outery,para)), as.complex(X[xtidx]), as.complex(Y[ytidx]), erg=complex(outerx*outery*para), NAOK=TRUE ## ,DUP=FALSE )$erg } else { E <- .C(tensoramulhelper, dimx=as.integer(c(outerx,inner,para)), as.integer(c(inner,outery,para)), as.integer(c(outerx,outery,para)), as.numeric(X[xtidx]), as.numeric(Y[ytidx]), erg=numeric(outerx*outery*para), NAOK=TRUE# ,DUP=FALSE )$erg #E <- matrix(X[xtidx],ncol=inner)%*% # matrix(Y[ytidx],nrow=inner) } return( gsi.debugr(to.tensor(c(E),dime,ndimnames=ndim))) } ## rep.tensor <- function(x,times,pos=1,name="i",...) { gsi.debugn("rep.tensor$dimX=",dim(x)," times=",times," pos=",pos," name=",name) if( length(times) > 1 || is.na(name)) { pos <- toPos.tensor(x,pos) x <- reorder.tensor(x,pos) m <- prod(dim(x)[-1]) ndimnames <- dimnames(x) ndimnames[[1]] <- rep(ndimnames[[1]],times) tmp <- to.tensor(rep(c(unclass(x)),rep(times,m)), c(gsi.namednumber(name,sum(times)),dim(x)[-1]), ndimnames=dimnames(x)) } else { tmp <- to.tensor(rep(c(unclass(x)),each=times), c(gsi.namednumber(name,times),dim(x)), ndimnames=c(gsi.namedlist(name,NULL),dimnames(x))) } if( !missing(pos) ) tmp <- reorder.tensor(tmp,gsi.invperm(pos)) gsi.debugr(tmp) } # # collabiert Tensor (spur bilden) ueber die Indices i,j # (auch listen sind erlaubt, # dann werden mehere paare collabiert) trace.tensor <- function(X,i,j) { gsi.debugn("trace.tensor$dimX=",X," i=",i," dimY=",dim(X)," j=",j," by"=by) i <- toPos.tensor(X,i) j <- toPos.tensor(X,j) il <- length(i) jl <- length(j) d <- dim(X) if( il!=jl ) stop("trace.tensor needs pairs to collaps") if( length(i)==0 ) return(X) if( is.null(d) ) stop("trace.tensor only works on tensors") if( length( unique(c(i,j)) )!=2*il ) stop("trace.tensor only allows different indices") if( any( d[i]!=d[j] )) stop("trace.tensor non conformable indices used") if( 2*il == 1 ) return(sum(X)) odimnames <- dimnames(X) weight <- gsi.weights(d) rest <- gsi.rest(d,c(i,j)) nweight <- c(weight[rest],weight[i]+weight[j]) dime <- d[rest] dimZ <- c(dime,d[i]) rep(1,prod(d[j])) collapsdim<- prod(d[j]) E <- matrix( X[gsi.weightedndx(d[c(rest,i)],nweight)], ncol=collapsdim)%*%rep(1,collapsdim) gsi.debugr(to.tensor(c(E),dime,ndimnames=odimnames[rest])) } delta.tensor <-function(d,mark="'",dn=NULL,by=NULL) { if( is.list(d) ) { ndimnames <- d d <- sapply(ndimnames,length) } else { ndimnames <- rep(list(NULL),length(d)) names(ndimnames) <- names(d) } if( !is.null(by) ) { by <- toPos.tensor(,by,mnames=names(d)) d2 <- d[by] d <- d[-by] ndimnames2 <-ndimnames[by] ndimnames <- ndimnames[-by] } else { d2 <- c() } X <- diag(prod(d)) if( is.null(by) ) gsi.debugr(to.tensor(c(X),c(d,mark(d,mark)),ndimnames=c(ndimnames,ndimnames))) else gsi.debugr(mul.tensor(to.tensor(c(X),c(d,mark(d,mark)),ndimnames=c(ndimnames,ndimnames)),c(),one.tensor(d2,ndimnames2))) } diag.tensor <- function(X,mark="'",dn=NULL,by=NULL) { # if( length(by) > 0 ) { by <- toPos.tensor(X,by,missing.ok=TRUE) by <- by[!is.na(by)] nby <- gsi.without(1:level.tensor(X),toPos.tensor(X,by)) dx <- dim(X) if( is.null(dn) ) dn <- dimnames(X) XX <- reorder.tensor(X,c(nby,by)) pnb <- prod(dx[nby]) pb <- prod(dx[by]) if( is.complex(X) ) tt <- complex(pnb^2*pb) else tt <- numeric(pnb^2*pb) tt[ rep( (1:pnb)*(1+pnb)-pnb , pb) + pnb^2*(rep((1:pb)-1,each=pnb)) ]<-c(XX) gsi.debugr(to.tensor(tt,c(dx[nby],mark(dx[nby],mark=mark),dx[by]), c(dn[nby],dn[nby],dn[by]))) # } else gsi.debugr(to.tensor(c(diag(c(X))),c(dim(X),mark(dim(X),mark)),if(is.null(dn)) c(dimnames(X),dimnames(mark(X,mark))) else c(dn,dn) )) } tripledelta.tensor <- function(d,mark1="'",mark2="*",dn=NULL) { p <- prod(d) tt <- rep(0,p^3) tt[1+(1+p+p^2)*(0:(p-1))]<-1 gsi.debugr(to.tensor(tt,c(d,mark(d,mark1),mark(d,mark2)),if(is.null(dn)) NULL else c(dn,dn,dn))) } one.tensor <- function(d=NULL,dn=NULL) { if( length( d )==0 ) return(1) gsi.debugr(to.tensor(rep(1,prod(d)),d,dn)) } mark <- function(X,mark,...) UseMethod("mark") mark.tensor <- function(X,mark="'",i=1:level.tensor(X),...,by=NULL) { i <- toPos.tensor(X,i) if( ! is.null(by) ) { not <- toPos.tensor(X,by) i <- i[! (i %in% by[!is.na(by)])] } nam <- names(X) nam[i] <- paste(nam[i],mark,sep="") names(X) <- nam gsi.debugr(X) } mark.numeric <- function(X,mark="'",i=1:length(X),...,by=NULL) { nam <- names(X) if( is.character(i) ) { oi<-i i <- charmatch(i,nam) if( any(is.na(i)) || any(i==0) ) stop("mark.numeric: No mach found for ", oi[is.na(oi)|i==0]) } if(! is.null(by) ) { if( is.character(by) ) by <- charmatch(by,nam) i <- i[!(i %in% by[!is.na(by)])] } nam[i] <- paste(nam[i],mark,sep="") names(X) <- nam X } mark.character <- function(X,mark="'",i=1:length(X),...,by=NULL) { nam <- X if( is.character(i) ) { oi<-i i <- charmatch(i,nam) if( any(is.na(i)) || any(i==0) ) stop("mark.character: No mach found for ", oi[is.na(oi)|i==0]) } if( !is.null(by) ) { if( is.character(by) ) by <- charmatch(by,nam) i <- i[!(i %in% by[!is.na(by)])] } nam[i] <- paste(nam[i],mark,sep="") nam } inv.tensor <- function(X,i,...,allowSingular=FALSE,eps=1E-10,by=NULL) { gsi.debug("inv.tensor$dimX=",dim(X)," i=",i) if( ! is.tensor(X) ) warning("Not a tensor given") if( is.tensor(i) ) stop("Tensor given as index") X <- as.tensor(X) i <- toPos.tensor(X,i) by <- toPos.tensor(X,by,missing.ok=TRUE) by <- by[!is.na(by)] j <- gsi.without(1:length(dim(X)),c(i,by)) dx <-dim(X) dn <-dimnames(X) XX <- reorder.tensor(X,c(i,j,by)) XX <- c(unclass(XX)) dim(XX) <- c(prod(dx[i]),prod(dx[j]),prod(dx[by])) if( allowSingular ) { XX <- unlist(lapply(as.list(1:dim(XX)[3]), function(i) { udv <- svd(XX[,,i]) if( abs(udv$d[1])^2 == 0 ) rank <- 0 else rank <- sum( abs(udv$d[1])*eps < abs(udv$d) ) # cat("The rank is ",rank,"\n") if( rank == 0 ) return(matrix(0,nrow=dim(XX)[1],ncol=dim(XX)[2])) Conj(udv$u[,1:rank,drop=FALSE] %*% ( 1/udv$d[1:rank] * t(Conj(udv$v[,1:rank,drop=FALSE])) )) } )) } else { XX <- unlist(lapply(as.list(1:dim(XX)[3]), function(i) { t(solve(XX[,,i])) } )) } gsi.debugr(to.tensor(c(XX),dx[c(i,j,by)],dn[c(i,j,by)])) } solve.tensor <- function(a,b,i,j=i,...,allowSingular=FALSE,eps=1E-10,by=NULL) { gsi.debug("mul.tensor$dima=",dim(a)," i=",i," dimb=",dim(b)," j=",j," by"=by) if( !is.tensor(a) || ! is.tensor(b) ) warning("Nontensors provided as tensors in solve.tensor") if( is.tensor(i) || is.tensor(j) ) stop("Tensor provided as index in solve.tensor") X <- as.tensor(a) b <- as.tensor(b) i <- toPos.tensor(X,i) j <- toPos.tensor(b,j) byi <-toPos.tensor(X,by,missing.ok=TRUE) byj<- toPos.tensor(b,by,missing.ok=TRUE) byNob <- by[!is.na(byi) & is.na(byj)] byBoth<- by[!is.na(byi)&!is.na(byj)] byi <- toPos.tensor(X,byBoth) byj <- toPos.tensor(b,byBoth) byi2<- toPos.tensor(X,byNob) k <- gsi.without(1:length(dim(X)),c(i,byi,byi2)) l <- gsi.without(1:length(dim(b)),c(j,byj)) dx <-dim(X) db <-dim(b) dnx <-dimnames(X) dnb <-dimnames(b) XX <- reorder.tensor(X,c(i,k,byi,byi2)) bb <- reorder.tensor(b,c(j,l,byj)) XX <- c(unclass(XX)) bb <- c(unclass(bb)) if( length(i)!=length(j) || any(dx[i]!=db[j] ) ) stop("Not fitting dimensions in solve.tensor da=",paste(dx[i]), " db=",paste(db[j])) if( ! allowSingular && ( length(k)!=length(i) || prod(dx[i])!=prod(dx[k])) ) stop("Not square tensor in solve.tensor da=",paste(dx[i]), " db=",paste(dx[k])) dim(XX) <- c(prod(dx[i]),prod(dx[k]),prod(dx[byi]),prod(dx[byi2])) dim(bb) <- c(prod(db[j]),prod(db[l]),prod(db[byj])) if( allowSingular ) { EE <- unlist(lapply(as.list(1:dim(XX)[3]), function(i) { unlist(lapply(as.list(1:dim(XX)[4]),function(j) { udv <- svd(XX[,,i,j]) if( abs(udv$d[1])^2 == 0 ) rank <- 0 else rank <- sum( abs(udv$d[1]*eps) < abs(udv$d) ) #cat("Eps is ",eps,"\n") #cat("The rank is ",rank,"\n") # ud t(C(v)) x = b # x = v 1/d t(C(u))b udv$v[,1:rank] %*% ( 1/udv$d[1:rank] * (t(Conj(udv$u[,1:rank])) %*% bb[,,i] )) }))} )) } else { EE <- unlist(lapply(as.list(1:dim(XX)[3]), function(i) { unlist(lapply(as.list(1:dim(XX)[4]),function(j) { solve(XX[,,i,j],bb[,,i]) }))} )) } d <- c(dx[c(k)],db[c(l,byj)],dx[byi2]) pn <- mapply(function(x,y) if(is.null(x)) y else x,dnb[byj],dnx[byi]) dn <- c(dnx[k],dnb[l],pn,dnx[byi2]) gsi.debugr(to.tensor(c(EE),d,dn)) } #solve.tensor <- function(a,b,i,j=i,...,by=c()){ # if( length(by) > 0 ) { # i <- toPos.tensor(X,i) # j <- toPos.tensor(b,j) # byi <- toPos.tensor(X,by,missing.ok=TRUE) # byj <- toPos.tensor(X,by,missing.ok=TRUE) # # # } # X <- to.tensor(a) # b <- to.tensor(b) # i <- toPos.tensor(X,i) # j <- toPos.tensor(X,j) # # dx <- dim(X) # db <- dim(b) # ri <- gsi.rest(dx,i) # rj <- gsi.rest(db,j) # X<-gsi.matrify(X,i) # b<-gsi.matrify(b,j) # x<-solve(X,b) # x <- to.tensor(c(x),c(dx[ri],db[rj]),c(dimnames(X)[ri],dimnames(b)[rj])) # x #} chol.tensor <- function(X,i,j,...,name="lambda") { gsi.debug("chol.tensor$dimX=",dim(X)," i=",i," j=",j) if( !is.tensor(X) ) warning("Nontensors provided as tensors in chol.tensor") if( is.tensor(i) || is.tensor(j) ) stop("Tensor provided as index in chol.tensor") X <- as.tensor(X) i <- toPos.tensor(X,i) j <- toPos.tensor(X,j) by <- gsi.without((1:length(dim(X))),c(i,j)) if( length(i)!=length(j) || any(dim(X)[i]!=dim(X)[j]) ) stop("No symmetry in chol") dx <-dim(X) dn <-dimnames(X) XX <- reorder.tensor(X,c(i,j,by)) XX <- c(unclass(XX)) dim(XX) <- c(prod(dx[i]),prod(dx[j]),prod(dx[by])) ld <- gsi.namednumber(name,prod(dx[j])) XX <- unlist(lapply(as.list(1:dim(XX)[3]), function(i,...) { chol(XX[,,i],...) } ,...)) return(gsi.debugr(to.tensor(XX,c(ld,dx[c(i,by)]),c(list(NULL),dn[c(i,by)])))) } #chol.tensor <- function(X,i,j,...,name="lambda") { # X <- as.tensor(X) # i <- toPos.tensor(X,i) # j <- toPos.tensor(X,j) # d <- dim(X) # dn <- dimnames(X) # if( !is.null(dn) ) # dn <- c(gsi.namedlist(name,NULL),dn[i]) # if( length(i)!=length(j) || # length(unique(c(i,j)))!=length(d) ) # stop("chol.tensor: i or j wrong") # if( any(d[i]!=d[j]) ) # stop("chol.tensor: tensor not square") # ii <- gsi.invperm(c(i,j)) # X <- reorder.tensor(X,c(i,j)) # X <- gsi.matrify(X,1:length(i)) # print(svd(X)) # X <- chol(X) # X <- to.tensor(c(X),c(dim(X)[1],d[i]),dn) # X #} level.tensor <- function(X,...) { if( is.null(dim(X)) ) if( length(X) != 1 ) return(1) else return(0) return(length(dim(X))) } svd.tensor <- function(X,i,j=NULL,...,name="lambda",by=NULL) { gsi.debug("svd.tensor$dimX=",dim(X)," i=",i," j=",j," by"=by) if( !is.tensor(X) ) warning("Nontensors provided as tensors in svd.tensor") if( is.tensor(i) || is.tensor(j) ) stop("Tensor provided as index in svd.tensor") X <- as.tensor(X) i <- toPos.tensor(X,i) by <- toPos.tensor(X,by,missing.ok=TRUE) by <- by[!is.na(by)] if( is.null(j) ) j <- gsi.without((1:length(dim(X))),c(i,by)) else j <- toPos.tensor(X,j) if( missing(i) ) i <- gsi.without((1:length(dim(X))),c(j,by)) by <- gsi.without((1:length(dim(X))),c(i,j)) dx <- dim(X) dn<- dimnames(X) di <- dim(X)[i] din<- dimnames(X)[i] dj <- dim(X)[j] djn<- dimnames(X)[j] XX <- unclass(reorder.tensor(X,c(i,j,by))) dim(XX) <- c(prod(dx[i]),prod(dx[j]),prod(dx[by])) EE <- lapply(1:(dim(XX)[3]),function(i) { svd(XX[,,i]) },...) ld <- gsi.namednumber(name,min(prod(dx[i]),prod(dx[j]))) u <- to.tensor(unlist(lapply(EE,function(x) x$u)), c(dx[i],ld,dx[by]), c(dn[i],list(NULL),dn[by])) d <- to.tensor(unlist(lapply(EE,function(x) x$d)), c(ld,dx[by]), c(list(NULL),dn[by])) v <- to.tensor(unlist(lapply(EE,function(x) x$v)), c(dx[j],ld,dx[by]), c(dn[j],list(NULL),dn[by])) list(u=gsi.debugr(u), d=gsi.debugr(d), v=gsi.debugr(v)) } power.tensor <- function(X,i,j,p=0.5,by=NULL) { gsi.debug("power.tensor$dimX=",dim(X)," i=",i," j=",j," by"=by) if( !is.tensor(X) ) warning("Nontensors provided as tensors in power.tensor") if( is.tensor(i) || is.tensor(j) ) stop("Tensor provided as index in power.tensor") X <- as.tensor(X) udv <- svd.tensor(X,i,j,by=by,name="<<<") gsi.debugr(einstein.tensor(udv$u,diag=udv$d^p,udv$v,only="<<<",by=by)) } #power.tensor <- function(X,i,j,p=0.5) { # X <- as.tensor(X) # i <- toPos.tensor(X,i) # j <- toPos.tensor(X,j) # d <- dim(X) # if( length(i)!=length(j) || # length(unique(c(i,j)))!=length(d) ) # stop("root.tensor: i or j wrong") # if( any(d[i]!=d[j]) ) # stop("root.tensor: tensor not square") # ii <- gsi.invperm(c(i,j)) # udv <- svd.tensor(X,i,j) # k <- level.tensor(udv$u) # X <- mul.tensor(mul.tensor(diag(udv$d^p),1,udv$u,k),1,udv$v,k) # reorder.tensor(X,ii) #} to.matrix.tensor <- function(X,i,j,by=NULL) { if( !is.tensor(X) ) warning("Nontensors provided as tensors in to.matrix.tensor") if( (!missing(i) && is.tensor(i)) || (!missing(j) && is.tensor(j)) ) stop("Tensor provided as index in to.matrix.tensor") X <- as.tensor(X) d <- dim(X) dn <- dimnames(X) by = toPos.tensor(X,by,missing.ok=TRUE) by <- by[!is.na(by)] if( missing(i) ) { j <- toPos.tensor(X,j) i <- gsi.without((1:length(dim(X))),c(j,by)) } i <- toPos.tensor(X,i) if( missing(j) ) j <- gsi.without((1:length(dim(X))),c(i,by)) j <- toPos.tensor(X,j) structure(unclass(c(reorder.tensor(X,c(i,j,by)))), dim=c(i=prod(d[i]),j=prod(d[j]),d[by]), dimnames=c(list(NULL),list(NULL),dn[by]) ) } gsi.checkduplicate <- function(x) { value <- names(x) if( length(value) > length(unique(value)) ) { warning("Duplicated names in tensor",paste(value,col=" ")) TRUE } else FALSE } gsi.stdnames <- function(k,prefix="I",avoid=NULL) { if( k==0 ) character(0) else { tmp <- paste(prefix,1:k,sep="") if( !is.null(avoid) ) { while( any( a <- tmp %in% avoid ) ) { na <- sum(a) tmp <- c(tmp[!a],paste(prefix,(k+1):(k+na),sep="")) k <- k+na } } tmp } } gsi.namedlist <- function(nam,...) { tmp <- list(...) names(tmp) <- nam tmp } gsi.namednumber <- function(nam,...) { tmp <- c(...) names(tmp) <- nam tmp } untensor <- function(X,i=NULL,name=NULL,pos=1,by=NULL){ if( is.list(i) ) { if( !is.null(name)) names(i) <- name i <- lapply(i,function(i) names(X)[toPos.tensor(X,i)]) for(a in 1:length(i)) X <- untensor(X,i=i[[a]],names(i)[[a]],pos=1) return(X) } i <- toPos.tensor(X,i,by=by) d <- dim(X) r <- gsi.rest(d,i) X<- reorder.tensor(X,i) odimnames <- dimnames(X) k <- prod(d[i]) if( is.null(name)) names(k) <- gsi.stdnames(1,"I",avoid=names(d[-i])) else names(k) <- name dimnames(X) <- NULL dim(X) <- c(k,d[r]) dimnames(X) <- c(structure(list(gsi.untensornames(odimnames[i],d[i])),names=name),odimnames[-(1:length(i))]) if( pos!=1 ) X <- reorder.tensor(X,gsi.invperm(pos)) gsi.debugr(X) } gsi.untensornames <- function(X,d=sapply(X,length)) { if( is.null(X) ) return(NULL) if( length(X) == 0 ) return(NULL) wrong <- sapply(X,is.null) if( all(wrong) ) return(NULL) if( any(wrong ) ) X[wrong]<- lapply(d[wrong],seq) s<-"" for(i in 1:length(X) ) { s <- outer(s,X[[i]],paste,sep="") } return(c(s)) } as.tensor <- function(X,...) UseMethod("as.tensor") as.tensor.default <- function(X,...,dims=NULL) { # if( !is.numeric(X)) # stop("as.tensor.default: Only numeric tensors\ # supported") if( is.null(dims) ) gsi.debugr(to.tensor(X)) else gsi.debugr(to.tensor(c(X),dims)) } as.tensor.tensor <- function(X,...) {X} #renorm.rows <- function(X) { # X/c(sqrt(X^2%*%rep(1,dim(X)[2]))) #} #renorm.tensor<-function(X,i){ # if( missing(i) ) # return(to.tensor(c(X/norm.tensor(X)),dim(X),dimnames(X))) # i <- toPos.tensor(X,i) # d <- dim(X) # inorm <- 1/sqrt(mul.tensor(X^2,i,one.tensor(d[i]))) # diagmul.tensor(X,gsi.rest(dim(X),i),inorm,1:level.tensor(inorm)) #} slice.tensor <-function(X,i,what,drop=FALSE) { i <- toPos.tensor(X,i) if( length( i ) > 1 ) { stop("multiple slice not supported yet") } d <- dim(X) dn <- dimnames(X) dimnames(X) <- NULL if( is.character(what) ) { if(!is.null(dn) && !is.null(dn[[i]])) what <- match(what,dn[[i]]) else stop("slice.tensor: Missing names with named subscript") } if( any(what>d[i])) stop( "slice.tensor: subscript out of bound: ",what) if( any(what<1) ) { if( all(what<0) ) what <- 1:(d[i])[what] else stop( "slice.tensor: mixed positiv and negativ subscribts" ) } w <- gsi.weights(d) nd <- d nd[i]<-length(what) ndx <- c(1) for(j in 1:length(nd)) if( j!=i ) { ndx <- rep(ndx,nd[j])+ rep( ((1:nd[j]-1)*w[j]),rep(length(ndx),nd[j]) ) } else { ndx <- rep(ndx,nd[j])+ rep( ((what[1:nd[j]]-1)*w[j]), rep(length(ndx),nd[j]) ) } X <- X[ndx] if( !is.null(dn) && !is.null(dn[[i]]) ) dn[[i]] <- dn[[i]][what] if( drop && length(what) == 1 ) { nd <- nd[-i] if( ! is.null(dn) ) dn <- dn[-i] } dim(X)<-nd names(dn) <- names(nd) dimnames(X) <- dn gsi.debugr(X) } "[[.tensor" <- function(X,...,drop=TRUE) { namedargs <- list(...) if( is.null(names(namedargs)) ) return(NextMethod("[[",X)) for(n in names(namedargs)) { if( is.call(namedargs[[n]])) { k <- match(n,names(X)) if( is.na(k) ) stop("noexisting dimension in [[.tensor") names(X)[k]<- as.character(namedargs[[n]][[2]]) } else X <- slice.tensor(X,n,namedargs[[n]],drop=drop) } gsi.debugr(X) } "[.tensor" <- function(X,...,drop=TRUE) { # as.tensor.default(NextMethod("[",X)) dimnames(X) <- dimnames(X) r <- NextMethod("[",X) names(dim(r))<-names(dimnames(r)) as.tensor.default(r) } "[[<-.tensor" <- function(X,...,value) { U <- to.tensor(1:length(X),dim(X),dimnames(X)) U <- U[[...]] if( !is.null(dim(value)) && (length(dim(value))!=length(dim(U)) || any(dim(U)!=dim(value)))) warning("non matching arrays in [<-.tensor",paste(dim(U),col=","),":",paste(dim(value),col=",")) X[c(U)]<- c(value) gsi.debugr(X) } undrop.tensor <- function(A,name,pos=1) { dn <- c(dimnames(A),"!intern"=list(NULL)) dm <- c(dim(A),"!intern"=1) names(dn)[length(names(dn))]<-name names(dm)[length(names(dm))]<-name attr(A,"dimnames") <- NULL dim(A) <- dm dimnames(A) <- dn if( !missing(pos) ) { A <- reorder.tensor(A,2:pos) } A } #combineCF.tensor <- function(A,i,B,j) { # i <- toPos.tensor(A,i) # j <- toPos.tensor(B,j) # dA <- dim(A) # dB <- dim(B) # if( length(i) !=length(j) || any(dA[i]!=dB[j] )) # stop("combineCF: Dimensions don't match", paste(da[i]),":",paste(dB[j])) # A <- gsi.matrify(A,i) # B <- gsi.matrify(B,j) # nB <-dim(B)[2] # O <- matrix(0,nrow=nB,ncol=nB) # C <- rbind(cbind(A,B),cbind(t(B),O)) # C #} bind.tensor <- function(A,dA=NULL,B,dB=dA) { if( is.null(A) ) return(B) if( is.null(B) ) return(A) if( is.null(dA) ) { A <- undrop.tensor(A,"i") dA <- length(dim(A)) } dA <- toPos.tensor(A,dA) if( is.null(dB) ) { B <- undrop.tensor(B,"i") dB <- length(dim(B)) } dB <- toPos.tensor(B,dB) matching <- match(names(A)[-dA],names(B)[-dB]) if( !all(is.na(matching))) { if(any(is.na(matching))) { stop("wrong match in bind.tensor") } B <- reorder.tensor(B,c(dB,matching)) dB <- 1 } if( length(dB) != 1 || 1!=length(dA) || length(dim(B))!=length(dim(A)) || any(dim(B)[-dB]!=dim(A)[-dA]) ) { stop("bind.tensor dimensions don't match") } dAo <- dA dBo <- dB A <- reorder.tensor(A,(1:length(dim(A)))[-dA]) B <- reorder.tensor(B,(1:length(dim(B)))[-dB]) dA <- length(dim(A)) dB <- length(dim(A)) dn <- dimnames(A) dnB <- dimnames(dB)[[dB]] if(! is.null(dn[[dA]]) || ! is.null(dnB)) { if( is.null(dnB)) dnB <- gsi.stdnames(dim(B)[dB],"B") if( is.null(dn[[dA]])) dn[[dA]] <- gsi.stdnames(dim(A)[dA],"A") dn[[dA]] <- c(dn[[dA]],dnB) } dm <- dim(A) dm[dA] <- dm[dA]+dim(B)[dB] erg <- c(A,B) dim(erg) <- dm dimnames(erg) <- dn reorder.tensor(erg,c(gsi.vonbis(1,dAo-1),length(dA))) } gsi.vonbis <- function(a,b) { if( b>=a ) a:b else c() } toPos.tensor <- function(M,l=NULL,mnames=names(dim(M)),by=NULL,...,both=FALSE,missing.ok=FALSE) { if( is.null(l) && ! is.null(by) ) return( (1:length(mnames))[-toPos.tensor(M,by,mnames,both=both)]) if( length(l) == 0 ) return(numeric(0)) if( is.name(l) ) l <- as.character(l) if( is.character(l) ) { if( both ) e <- charmatch(as.covariate(l),as.covariate(mnames)) else e <- charmatch(l,mnames) if( any(is.na(e)) & !missing.ok ) stop("No match found for ",paste(l[is.na(e)],col=",")) if( any(e[!is.na(e)]==0) ) stop("Match not unique for ", paste(l[e==0],col=",") ) return(e); } else if( is.numeric(l) ) { return(l) } else { n <- sapply(l,is.character) if( any( n )) l[n]<-toPos.tensor(M,l=as.character(l[n]),names=mnames) return(l); } } einstein.tensor <- function(...,only=NULL,by=NULL) { ts <- list(...) if( length(ts) == 0 ) return(NULL) if( length(ts) < 2 ) return(ts[[1]]) tmp <- NULL nams <- names(ts) if( is.null(nams) ) { nam <- rep("",length(ts)) } for(k in 1:length(ts)) { ten <- ts[[k]] nam <- nams[k] if( !is.null(nam) && nam=="diag" ) { if( !is.tensor(ten) ) warning("diagmul with nontensor in einstein.tensor") tmp <- diagmul.tensor(tmp,names(ten),ten,names(ten)) } else if( is.character(ten) ) { olds <- match(nam,names(tmp)) if( is.na(olds) ) stop("Unknown dimension ",ten," in einstein.tensor") if( ten %in% names(tmp) ) { tmp <- trace.tensor(tmp,nam,ten) } else { names(tmp)[olds]<-ten } } else if( is.null(tmp) ) { tmp <- ten } else if( is.null(ten)) { tmp <- tmp } else if( length(ten) == 1 && is.null(dim(ten))) { tmp <- tmp*ten } else if( length(tmp) == 1 && is.null(dim(tmp))) { tmp <- tmp*ten } else { n1 <- names(tmp) n2 <- names(ten) if( is.null(n1) || is.null(n2)) { tmp <- mul.tensor(tmp,c(),ten,c()) } else { jm <- match(n1,n2) i <- n1[!is.na(jm)] if( !missing(only) ) i <- i[i %in% only] if( !is.null(by) ) i <- i[! (i %in% by)] tmp<- mul.tensor(tmp,i,ten,i,by=by) } } } gsi.debugr(tmp) } "%e%" <- function(x,y) UseMethod("%e%") "%e%.tensor" <- function(x,y) einstein.tensor(x,y) "%r%" <- function(x,y) UseMethod("%r%") "%r%.tensor" <- function(x,y) riemann.tensor(x,y) #"%+%" <- function(x,y) UseMethod("%+%") "+.tensor" <- function(x,y) add.tensor(x,y) #"-" <- function(x,y) UseMethod("%-%") "-.tensor" <- function(x,y) { if( missing(y) ) { oc <- class(x) structure(-unclass(x),class=oc) } else add.tensor(x,y,"-") } "*.tensor" <- function(x,y) add.tensor(x,y,"*") "/.tensor" <- function(x,y) add.tensor(x,y,"/") add.tensor <- function(X,Y,op="+",only=NULL) { if( !is.tensor(X) ) if( length(X) == 1 ) return(gsi.debugr(to.tensor(c(do.call(op,list(unclass(X),unclass(Y)))), dim(Y),dimnames(Y)))) else stop("Tensor operation with nontensor") if( !is.tensor(Y)) if( length(Y) == 1) return(gsi.debugr(to.tensor(c(do.call(op,list(unclass(X),unclass(Y)))), dim(X),dimnames(X)))) else stop("Tensor operation with nontensors") wk <- names(X) %in% names(Y) if( !is.null(only) ) wk <- wk & (names(X) %in% only) nams <- names(X)[wk] #if( length(nams) < 1 ) # warning("No match in add.tensor",names(X),":",names(Y),":",only) i <- toPos.tensor(X,nams) j <- toPos.tensor(Y,nams) if( level.tensor(Y)>length(nams) ) { Xb <- mul.tensor(X,c(),one.tensor(gsi.without(dim(Y),j),gsi.without(dimnames(Y),j)),c()) # dimnames(Xb)[(length(dim(X))+1) : length(dim(Xb))]<-dimnames(Y)[-j] } else Xb <- X if( level.tensor(X)>length(nams) ) Yb <- mul.tensor(Y,c(),one.tensor(gsi.without(dim(X),i), gsi.without(dimnames(X),i)),c()) #to.tensor( unclass(one.tensor(dim(X)[-i])) %o% unclass(Y) ) else Yb <- Y Yb <- reorder.tensor(Yb,names(Xb)) gsi.debugr(to.tensor(c(do.call(op,list(unclass(Xb),unclass(Yb)))),dim(Xb),dimnames(Xb))) } #secondnames <- function(n,tag="'") { # paste(n,tag,sep="") #} #firstnames <- function(n,tag) { # substr(n,1,nchar(n)-nchar(tag)) #} contraname <- function(x) ifelse(substr(x,1,1)=="^",substr(x,2,100000),paste("^",x,sep="")) is.covariate <- function(x,...) UseMethod("is.covariate") is.covariate.tensor <- function(x,...) {is.covariate(names(dim(x)))} is.covariate.numeric <- function(x,...) {is.covariate(names(x))} is.covariate.character <- function(x,...) {substr(x,1,1)!="^"} as.covariate <- function(x,...) UseMethod("as.covariate") as.covariate.character <- function(x,...) ifelse(substr(x,1,1)=="^",substr(x,2,100000),x) is.contravariate <- function(x,...) UseMethod("is.contravariate") is.contravariate.tensor <- function(x,...) {is.contravariate(names(x))} is.contravariate.numeric <- function(x,...) {is.contravariate(names(x))} is.contravariate.character <- function(x,...) {substr(x,1,1)=="^"} as.contravariate <- function(x,...) UseMethod("as.contravariate") as.contravariate.character<- function(x,...) ifelse(substr(x,1,1)=="^",x,paste("^",x,sep="")) drag.tensor <- function(x,g,d) { gsi.debug("drag.tensor$dimx=",dim(x)," d=",d," dimg=",dim(g)) cg <- is.covariate(g) if( (any(cg ) && ! all(cg)) || level.tensor(g)!= 2 ) stop("g must be either covariate or contravariate") gcov <- gcon <- g if( all(cg) ) gcon <- inv.tensor(g,1) else gcov <- inv.tensor(g,1) for(i in d) { na <- names(x)[toPos.tensor(x,i,both=TRUE)] xko <- is.covariate(na) if( xko ) { names(gcon) <- c(na,contraname(na)) x <- mul.tensor(x,na,gcon,na) } else { names(gcov) <- c(na,contraname(na)) x <- mul.tensor(x,na,gcov,na) } } gsi.debugr(x) } riemann.tensor <- function(...,only=NULL,by=NULL) { ts <- list(...) if( length(ts) == 0 ) return(NULL) if( length(ts) < 2 ) return(ts[[1]]) tmp <- NULL nams <- names(ts) if( is.null(nams) ) { nam <- rep("",length(ts)) } for(k in 1:length(ts)) { ten <- ts[[k]] nam <- nams[k] if( !is.null(nam) && nam=="diag" ) { tmp <- diagmul.tensor(tmp,contraname(names(ten)),ten,names(ten)) } else if( identical(is.character(ten),TRUE) ) { olds <- match(nam,names(tmp)) news <- match(contraname(ten),names(tmp)) if( is.na(olds) ) if( is.na(news) ) stop("Unknown dimension ",nam,"and",contraname(ten), " in riemann.tensor") else names(tmp)[news]<-contraname(nam) else if( is.na(news) ) names(tmp)[olds]<-ten else tmp <- trace.tensor(tmp,nam,contraname(ten)) } else if( is.null(tmp) ) { tmp <- ten } else if( is.null(ten)) { tmp <- tmp } else if( length(ten) == 1 && is.null(dim(ten))) { tmp <- tmp*ten } else if( length(tmp) == 1 && is.null(dim(tmp))) { tmp <- tmp*ten } else { n1 <- names(tmp) n2 <- names(ten) if( is.null(n1) || is.null(n2)) { tmp <- mul.tensor(tmp,c(),ten,c()) } else { jm <- match(contraname(n1),n2) i <- n1[!is.na(jm)] if( !missing(only) ) i <- i[i %in% only] if( !is.null(by) ) i<- i[! (i%in% by) & ! (contraname(i) %in% by)] tmp<- mul.tensor(tmp,i,ten,contraname(i)) } } } gsi.debugr(tmp) } mean.tensor <- function(x,along,...,na.rm=FALSE) { if( !na.rm && !all(is.finite(x)) ) stop("Missings in mean.tensor") N <- to.tensor(as.numeric(is.finite(x)),dim(x)) along <- toPos.tensor(x,along) one <-one.tensor(dim(x)[along]) x[!is.finite(x)]<- 0 S <- mul.tensor( x ,along , one , 1:level.tensor(one) ) n <- mul.tensor( N ,along , one , 1:level.tensor(one) ) S/n # to.tensor(c(S)/c(n),dim(S),dimnames(S)) } var.tensor <- function(x,y=NULL,...,along,by=NULL,na.rm=FALSE,mark="'") { if( is.null(y) ) { if( !na.rm && !all(is.finite(x)) ) stop("Missings in bar.tensor") Nx <- to.tensor(as.numeric(is.finite(x)),dim(x)) by <- toPos.tensor(x,by) by <- by[!is.na(by)] along <- toPos.tensor(x,along) one <-one.tensor(dim(x)[along]) x[!is.finite(x)]<- 0 S <- mul.tensor( x ,along , one , 1:level.tensor(one) ) n <- mul.tensor( Nx ,along , one , 1:level.tensor(one) ) #M <-one.tensor(dim(x)[along]) #S <- mul.tensor( x,along , N , 1:level.tensor(M) ) #d <- gsi.without(1:level.tensor(X),c(along,by)) x <- x - S/n # to.tensor(c(S)/c(n),dim(S),dimnames(S)) y <- mark(x,mark,by=c(along,by)) Ny<- mark(Nx,mark,by=c(along,by)) S2 <- mul.tensor( x,along, y,along,by=by) N2 <- mul.tensor(Nx,along,Ny,along,by=by) S2 / (N2-1) #to.tensor(c(S2)/(c(N2)-1),dim(S2),dimnames(S2)) } else { if( !na.rm && !all(is.finite(x)) ) stop("Missings in bar.tensor") Nx <- to.tensor(as.numeric(is.finite(x)),dim(x)) Ny <- to.tensor(as.numeric(is.finite(y)),dim(y)) byx <- toPos.tensor(x,by) byy <- toPos.tensor(y,by) by <- by[!is.na(byx)&!is.na(byy)] byx <- toPos.tensor(x,by) byy <- toPos.tensor(y,by) alongx <- toPos.tensor(x,along) alongy <- toPos.tensor(y,along) if( any(dim(x)[alongx]!=dim(y)[alongy]) ) stop("Data dimensions dont match in covariance of tensors") onex <-one.tensor(dim(x)[alongx]) oney <-one.tensor(dim(y)[alongy]) x[!is.finite(x)]<- 0 y[!is.finite(y)]<- 0 Sx <- mul.tensor( x ,along , onex , 1:level.tensor(onex) ) Sy <- mul.tensor( y ,along , oney , 1:level.tensor(oney) ) nx <- mul.tensor( Nx ,along , onex , 1:level.tensor(onex) ) ny <- mul.tensor( Ny ,along , oney , 1:level.tensor(oney) ) MeanX <- to.tensor(c(Sx)/c(nx),dim(Sx),dimnames(Sx)) MeanY <- to.tensor(c(Sy)/c(ny),dim(Sy),dimnames(Sy)) x <- x-MeanX # x <- x %-% MeanX y <- y-MeanY # y %-% MeanY S2 <- mul.tensor( x,along, y,along,by=by) N2 <- mul.tensor(Nx,along,Ny,along,by=by) S2/(N2-1)#to.tensor(c(S2)/(c(N2)-1),dim(S2),dimnames(S2)) } } tensorA/MD50000644000176200001440000000427613755572102012215 0ustar liggesusers8ca43cbc842c2336e835926c2166c28b *COPYING c06ebc0ca4c838972a2e796eecef3835 *DESCRIPTION d3d461c853ae03d3384227406cc69abf *NAMESPACE 3fdd832f3c450af97470ce66b7200fb0 *R/TensorAx.R ce0ee7cfc43d1c6f1cda3911dfba7b49 *man/addtensor.Rd cecb5cf56f84f7985e01b655e8953106 *man/astensor.Rd 2e9e74271ccdd039d340554a7f26aa61 *man/bindtensor.Rd d0beaa2f837763d715d5fe56629c2863 *man/choltensor.Rd 78c2c38b9021837ff9deabac63ad0e52 *man/deltatensor.Rd b0c681e52c68fbd6d3dc5d5458fa9275 *man/diagmul.Rd f783855f504f1097070c09f4aa5b1c3c *man/diagtensor.Rd 20f1af56e7868370bfa333566a459d1c *man/dragtensor.Rd cf24402d51c7e14e956fd7d03e897e74 *man/einstein.Rd f5a3921eb2ee01d504e54244ab9e78cc *man/ftabletensor.Rd 4ef203828c6725830c233e862fe2708e *man/invtensor.Rd a81fc045b21de6982dfdf396ba887b0f *man/istensor.Rd 28f102a654c2db5b2c3ed45cd9dc220a *man/leveltensor.Rd b57c210cbe6a4ba587b47a79b1eb76f0 *man/margin.Rd 35c9566bd669844203db9e629efa81a8 *man/marktensor.Rd e10739ee2ad6ae6e7cb4e836023939b6 *man/meantensor.Rd 108a13c0a7974bc99c11dc401e7fcade *man/multensor.Rd 50c8e98a5776858dae33725cdd79e14f *man/names.Rd 3d1ea089129c051371de309fa4eb5dfc *man/normtensor.Rd a08de947c6d9f6a2ca27e3671cf51210 *man/onetensor.Rd 98564ca99865ab7139b55ff381f3717e *man/postensor.Rd bb7cedaead85cf18e5cf19c810007f6b *man/powertensor.Rd 46e24983372d60fe38d43ee036cd691d *man/reordertensor.Rd 6d0720088d440b7b8edf06e5009ccc49 *man/reptensor.Rd dade3b27d453170c0dfd1f2706109741 *man/riemann.Rd 9c42f7326d01edca697fa1409e5866c0 *man/sequencing.Rd 2c012109841bcca8c660770eedc57d41 *man/slicetensor.Rd 5f1501decc13d69499a22308003b6c1f *man/solvetensor.Rd 66ad3fabe56af8ae09b5368de200cd85 *man/svdtensor.Rd a414730d55019e8aee89d841248fa6bd *man/tensorA.package.Rd d3be54c767bab3096dafb6c05a278c71 *man/toPos.Rd 4df2ac5be1de1e38fad6715a1c64e525 *man/tomatrixtensor.Rd cea160b94ab67562abf52a1b1e6b24db *man/totensor.Rd eaa435e2e93c20b9d0ef58d2a5347412 *man/tracetensor.Rd 250d1249cba40618dcbc02ea0d2c6c60 *man/tripledelta.Rd c0171fd359e54ad2c58fa43c941842cc *man/undroptensor.Rd eec293baef123b329cceecb180bfb3cf *man/untensor.Rd 61f1b99298d9b6b98bb9f978208d0b9e *src/tensora.c 15ca7224c5c5ea23fe5f1de288237edc *tests/checker.R 081df080601661722d1a5b23c48ae228 *tests/examples.R