AMORE/0000755000175100001440000000000011352512213011126 5ustar hornikusersAMORE/DESCRIPTION0000755000175100001440000000155511352605073012654 0ustar hornikusersEncoding: UTF-8 Package: AMORE Version: 0.2-12 Date: 2010-03-25 Title: A MORE flexible neural network package Author: Manuel Castejón Limas, Joaquín B. Ordieres Meré, Ana González Marcos, Francisco Javier Martínez de Pisón Ascacibar, Alpha V. Pernía Espinoza, Fernando Alba Elías Maintainer: Manuel Castejón Limas Description: This package was born to release the TAO robust neural network algorithm to the R users. It has grown and I think it can be of interest for the users wanting to implement their own training algorithms as well as for those others whose needs lye only in the "user space". License: GPL (>= 2) URL: http://rwiki.sciviews.org/doku.php?id=packages:cran:amore LazyLoad: yes Packaged: 2010-03-24 22:39:07 UTC; mcasl Repository: CRAN Date/Publication: 2010-03-25 07:01:15 AMORE/man/0000755000175100001440000000000011352605073011710 5ustar hornikusersAMORE/man/ADAPTgd.MLPnet.Rd0000644000175100001440000000216311352510403014473 0ustar hornikusers\name{ADAPTgd.MLPnet} \alias{ADAPTgd.MLPnet} \title{Adaptative gradient descent training} \description{Adaptative gradient descent training method.} \usage{ ADAPTgd.MLPnet(net,P, T,n.epochs) } \arguments{ \item{net}{Neural Network to train.} \item{P}{Input data set.} \item{T}{Target output data set.} \item{n.epochs}{Number of epochs to train} } \value{This function returns a neural network object modified according to the input and target data set.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{newff},\link{train},\link{ADAPTgdwm.MLPnet}} } \keyword{neural} AMORE/man/ADAPTgdwm.MLPnet.Rd0000644000175100001440000000222311352510403015034 0ustar hornikusers\name{ADAPTgdwm.MLPnet} \alias{ADAPTgdwm.MLPnet} \title{Adaptative gradient descent with momentum training} \description{Adaptative gradient descent with momentum training method.} \usage{ ADAPTgdwm.MLPnet(net,P, T,n.epochs) } \arguments{ \item{net}{Neural Network to train.} \item{P}{Input data set.} \item{T}{Target output data set.} \item{n.epochs}{Number of epochs to train} } \value{This function returns a neural network object modified according to the input and target data set.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{newff},\link{train},\link{ADAPTgd.MLPnet}} } \keyword{neural} AMORE/man/BATCHgd.MLPnet.Rd0000644000175100001440000000220011352510403014453 0ustar hornikusers\name{BATCHgd.MLPnet} \alias{BATCHgd.MLPnet} \title{Batch gradient descent training} \description{Modifies the neural network weights and biases according to the training set.} \usage{ BATCHgd.MLPnet(net,P,T,n.epochs) } \arguments{ \item{net}{Neural Network to train.} \item{P}{Input data set.} \item{T}{Target output data set.} \item{n.epochs}{Number of epochs to train} } \value{This function returns a neural network object modified according to the chosen data.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{newff},\link{train},\link{BATCHgdwm.MLPnet}} } \keyword{neural} AMORE/man/BATCHgdwm.MLPnet.Rd0000644000175100001440000000222411352510403015025 0ustar hornikusers\name{BATCHgdwm.MLPnet} \alias{BATCHgdwm.MLPnet} \title{Batch gradient descent with momentum training} \description{Modifies the neural network weights and biases according to the training set.} \usage{ BATCHgdwm.MLPnet(net,P,T, n.epochs) } \arguments{ \item{net}{Neural Network to train.} \item{P}{Input data set.} \item{T}{Target output data set.} \item{n.epochs}{Number of epochs to train} } \value{This functions returns a neural network object modified according to the chosen data.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{newff},\link{train},\link{BATCHgd.MLPnet}} } \keyword{neural} AMORE/man/deltaE.Rd0000644000175100001440000000372611352512143013400 0ustar hornikusers\name{error.LMS} \alias{error.LMS} \alias{error.LMLS} \alias{error.TAO} \alias{deltaE.LMS} \alias{deltaE.LMLS} \alias{deltaE.TAO} \title{Neural network training error criteria.} \description{The error functions calculate the goodness of fit of a neural network according to certain criterium: \itemize{ \item LMS: Least Mean Squares Error. \item LMLS: Least Mean Log Squares minimization. \item TAO: TAO error minimization. } The deltaE functions calculate the influence functions of their error criteria. } \usage{ error.LMS(arguments) error.LMLS(arguments) error.TAO(arguments) deltaE.LMS(arguments) deltaE.LMLS(arguments) deltaE.TAO(arguments) } \arguments{ \item{arguments}{List of arguments to pass to the functions. \itemize{ \item The first element is the prediction of the neuron. \item The second element is the corresponding component of the target vector. \item The third element is the whole net. This allows the TAO criterium to know the value of the S parameter and eventually ( next minor update) will allow the user to apply regularization criteria.} } } \value{This functions return the error and influence function criteria.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{train}} } \keyword{neural} AMORE/man/graphviz.MLPnet.Rd0000644000175100001440000000217711352510403015166 0ustar hornikusers\name{graphviz.MLPnet} \alias{graphviz.MLPnet} \title{Neural network graphic representation} \description{Creates a dot file, suitable to be processed with graphviz, containing a graphical representation of the netwok topology and some numerical information about the network parameters.} \usage{ graphviz.MLPnet(net, filename, digits) } \arguments{ \item{net}{Neural Network.} \item{filename}{Name of the dot file to be written.} \item{digits}{Number of digits used to round the parameters.} } \value{This function writes a file suitable to be postprocessed with the graphviz package. Thus, multiple formats can be obtained: ps, pdf, ...} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ http:\/\/www.graphviz.org \cr \cr } \keyword{neural} AMORE/man/init.MLPneuron.Rd0000644000175100001440000001200411352511372015013 0ustar hornikusers\name{init.MLPneuron} \alias{init.MLPneuron} \title{Neuron constructor.} \description{Creates a neuron according to the structure established by the AMORE package standard.} \usage{init.MLPneuron(id, type, activation.function, output.links, output.aims, input.links, weights, bias, method, method.dep.variables)} \arguments{ \item{id}{Numerical index of the neuron (so as to be refered in a network operation).} \item{type}{Either hidden or ouput, according to the layer the neuron belongs to.} \item{activation.function}{The name of the characteristic function of the neuron. It can be "pureline", "tansig", "sigmoid" or even "custom" in case that the user wants to configure its own activation function accordingly defining f0 and f1.} \item{output.links}{The id's of the neurons that accept the output value of this neuron as an input.} \item{output.aims}{The location of the output of the neuron in the input set of the addressed neuron. Gives answer to: Is this output the first, the second, the third, ..., input at the addressed neuron?. Similarly for an output neuron: Is this output the first, the second, the third, ..., element of the output vector?} \item{input.links}{The id's of the neurons whose outputs work as inputs for this neuron. Positive values represent that we take the outputs of other neurons as inputs. Negative values represent the coordinates of the input vector to be considered as inputs.} \item{weights}{The multiplying factors of the input values.} \item{bias}{The bias summed to the weighted sum of the inputs.} \item{method}{Prefered training method. Currently it can be: \itemize{ \item "ADAPTgd": Adaptative gradient descend. \item "ADAPTgdwm": Adaptative gradient descend with momentum. \item "BATCHgd": BATCH gradient descend. \item "BATCHgdwm": BATCH gradient descend with momentum. } } \item{method.dep.variables}{Variables used by the training methods: \itemize{ \item ADAPTgd method: \itemize{ \item delta: Used in the backpropagation method. \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron. } \item ADAPTgdwm method: \itemize{ \item delta: Used in the backpropagation method. \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron. \item momentum: Momentum constant used in the backpropagation with momentum learning criterium. \item former.weight.change: Last increment in the weight parameters. Used by the momentum training technique. \item former.bias.change: Last increment in the bias parameter. Used by the momentum training technique. } \item BATCHgd method: \itemize{ \item delta: Used in the backpropagation method. \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron. \item sum.delta.x: Used as an acumulator of the changes to apply to the weight parameters in the batch training. \item sum.delta.bias: Used as an acumulator of the changes to apply to the bias parameters in the batch training. } \item BATCHgdwm method: \itemize{ \item delta: Used in the backpropagation method. \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron. \item sum.delta.x: Used as an acumulator of the changes to apply to the weight parameters in the batch training. \item sum.delta.bias: Used as an acumulator of the changes to apply to the bias parameters in the batch training. \item momentum: Momentum constant used in the backpropagation with momentum learning criterium. \item former.weight.change: Last increment in the weight parameters. Used by the momentum training technique. \item former.bias.change: Last increment in the bias parameter. Used by the momentum training technique. } }} } \value{\emph{init.MLPneuron} returns a single neuron. Mainly used to create a neural network object.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \seealso{ \code{\link{newff}}, \code{\link{random.init.MLPnet}}, \code{\link{random.init.MLPneuron}}, \code{\link{select.activation.function}} , \code{\link{init.MLPneuron}} } \keyword{neural} AMORE/man/newff.Rd0000644000175100001440000000675511352510403013311 0ustar hornikusers\name{newff} \alias{newff} \title{Create a Multilayer Feedforward Neural Network} \description{Creates a feedforward artificial neural network according to the structure established by the AMORE package standard.} \usage{newff(n.neurons, learning.rate.global, momentum.global, error.criterium, Stao, hidden.layer, output.layer, method) } \arguments{ \item{n.neurons}{Numeric vector containing the number of neurons of each layer. The first element of the vector is the number of input neurons, the last is the number of output neurons and the rest are the number of neuron of the different hidden layers.} \item{learning.rate.global}{Learning rate at which every neuron is trained.} \item{momentum.global}{Momentum for every neuron. Needed by several training methods.} \item{error.criterium}{Criterium used to measure to proximity of the neural network prediction to its target. Currently we can choose amongst: \itemize{ \item "LMS": Least Mean Squares. \item "LMLS": Least Mean Logarithm Squared (Liano 1996). \item "TAO": TAO Error (Pernia, 2004). }} \item{Stao}{Stao parameter for the TAO error criterium. Unused by the rest of criteria.} \item{hidden.layer}{Activation function of the hidden layer neurons. Available functions are: \itemize{ \item "purelin". \item "tansig". \item "sigmoid". \item "hardlim". \item "custom": The user must manually define the f0 and f1 elements of the neurons. }} \item{output.layer}{Activation function of the hidden layer neurons according to the former list shown above.} \item{method}{Prefered training method. Currently it can be: \itemize{ \item "ADAPTgd": Adaptative gradient descend. \item "ADAPTgdwm": Adaptative gradient descend with momentum. \item "BATCHgd": BATCH gradient descend. \item "BATCHgdwm": BATCH gradient descend with momentum. }} } \value{\emph{newff} returns a multilayer feedforward neural network object.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. Ana González Marcos. Alpha V. Pernía Espinoza. Eliseo P. Vergara Gonzalez. Francisco Javier Martinez de Pisón. Fernando Alba Elías. } \references{ Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{init.MLPneuron}}, \code{\link{random.init.MLPnet}}, \code{\link{random.init.MLPneuron}}, \code{\link{select.activation.function}} } \examples{ #Example 1 library(AMORE) # P is the input vector P <- matrix(sample(seq(-1,1,length=1000), 1000, replace=FALSE), ncol=1) # The network will try to approximate the target P^2 target <- P^2 # We create a feedforward network, with two hidden layers. # The first hidden layer has three neurons and the second has two neurons. # The hidden layers have got Tansig activation functions and the output layer is Purelin. net <- newff(n.neurons=c(1,3,2,1), learning.rate.global=1e-2, momentum.global=0.5, error.criterium="LMS", Stao=NA, hidden.layer="tansig", output.layer="purelin", method="ADAPTgdwm") result <- train(net, P, target, error.criterium="LMS", report=TRUE, show.step=100, n.shows=5 ) y <- sim(result$net, P) plot(P,y, col="blue", pch="+") points(P,target, col="red", pch="x") } \keyword{neural} AMORE/man/random.init.MLPnet.Rd0000644000175100001440000000202111352510403015542 0ustar hornikusers\name{random.init.MLPnet} \alias{random.init.MLPnet} \title{Initialize the network with random weigths and biases.} \description{Provides random values to the network weights and biases so as to start with. Basically it applies the random.init.MLPneuron function to every neuron in the network.} \usage{random.init.MLPnet(net) } \arguments{ \item{net}{The neural network object} } \value{\emph{random.init.MLPnet} returns the input network with weights and biases changed randomly.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \seealso{ \code{\link{random.init.MLPneuron}}, \code{\link{init.MLPneuron}}, \code{\link{newff}} } \keyword{neural} AMORE/man/random.init.MLPneuron.Rd0000644000175100001440000000265011352510403016272 0ustar hornikusers\name{random.init.MLPneuron} \alias{random.init.MLPneuron} \title{Initialize the neuron with random weigths and bias.} \description{Provides random values to the neuron weights and bias so as to start with. It is usually called by the random.init.NeuralNet function during the construction of the neural object by the \emph{newff} function. } \details{The values are assigned according to the suggestions of \cite{Haykin}.} \usage{random.init.MLPneuron(net.number.weights, neuron) } \arguments{ \item{net.number.weights}{Number of bias and weight parameters of the neural network the neuron belongs to.} \item{neuron}{The neuron object.} } \value{\emph{random.init.MLPneuron} returns the input neuron with bias and weights changed randomly.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \seealso{ \code{\link{random.init.MLPnet}}, \code{\link{init.MLPneuron}}, \code{\link{newff}} } \references{ Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \keyword{neural} AMORE/man/select.activation.function.Rd0000644000175100001440000000267411352510403017443 0ustar hornikusers\name{select.activation.function} \alias{select.activation.function} \title{Provides R code of the selected activation function.} \description{Provides random values to the neuron weights and bias so as to start with. It is usually called by the random.init.NeuralNet function during the construction of the neural object by the \emph{newff} function. } \usage{ select.activation.function(activation.function)} \arguments{ \item{activation.function}{ Activation function name. Currently the user may choose amongst \emph{purelin}, \emph{tansig}, \emph{sigmoid}, \emph{hardlim} and \emph{custom}. If \emph{custom} is chosen the the user must manually assign the neuron \emph{f0} and \emph{f1} functions.} } \value{\emph{select.activation.function} returns a list with two elements. The first, \emph{f0} is the R code selected to serve as the neuron activation function. The second, \emph{f1} is the R code of the activation function derivative.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \seealso{ \code{\link{init.MLPneuron}}, \code{\link{newff}} } \keyword{neural} AMORE/man/sim.MLPnet.Rd0000644000175100001440000000247011352510403014120 0ustar hornikusers\name{sim.MLPnet} \alias{sim} \alias{sim.MLPnet} \title{Performs the simulation of a neural network from an input data set.} \description{This function calculates the output values of the neural network for a given data set. Various versions are provided according to different degrees of C code conversion. The \emph{sim.MLPnet} function is the latest and quickest.} \usage{ sim(net,P,...) sim.MLPnet(net,P,...) } \arguments{ \item{...}{Currently, the parameters below are accepted.} \item{net}{Neural Network to simulate.} \item{P}{Data Set input values.} } \value{This function returns a matrix containing the output values of the neural network for the given data set.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{newff},\link{train}} } \keyword{neural} AMORE/man/taofun.Rd0000644000175100001440000000245011352512022013463 0ustar hornikusers\name{error.TAO} \alias{hfun} \alias{phifun} \alias{dphifun} \title{TAO robust error criterium auxiliar functions.} \description{Auxiliar functions. Not meant to be called from the user but from the \code{\link{error.TAO}} and the \code{\link{deltaE.TAO}} functions. } \usage{ hfun(v,k) phifun(v,k) dphifun(v,k) } \arguments{ \item{v}{Input value.} \item{k}{Threshold limit.} } \value{These functions return a numeric array with dimension equal to the dimension of v.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{train}} } \keyword{neural} AMORE/man/train.Rd0000644000175100001440000000477311352510403013317 0ustar hornikusers\name{train} \alias{train} \title{Neural network training function.} \description{For a given data set (training set), this function modifies the neural network weights and biases to approximate the relationships amongst variables present in the training set. These may serve to satisfy several needs, i.e. fitting non-linear functions.} \usage{ train(net, P, T, Pval=NULL, Tval=NULL, error.criterium="LMS", report=TRUE, n.shows, show.step, Stao=NA,prob=NULL) } \arguments{ \item{net}{Neural Network to train.} \item{P}{Training set input values.} \item{T}{Training set output values} \item{Pval}{Validation set input values for optional early stopping.} \item{Tval}{Validation set output values for optional early stopping.} \item{error.criterium}{Criterium used to measure the goodness of fit:"LMS", "LMLS", "TAO".} \item{Stao}{Initial value of the S parameter used by the TAO algorithm.} \item{report}{Logical value indicating whether the training function should keep quiet or should provide graphical/written information during the training process instead.} \item{n.shows}{Number of times to report (if report is TRUE). The total number of training epochs is n.shows times show.step.} \item{show.step}{Number of epochs to train non-stop until the training function is allow to report.} \item{prob}{Vector with the probabilities of each sample so as to apply resampling training.} } \value{This function returns a list with two elements: the trained Neural Network object with weights and biases adjusted by the adaptative backpropagation with momentum method and a matrix with the errors obtained during the training. If the validation set is provided, the early stopping technique is applied.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{newff}} } \keyword{neural} AMORE/man/training.report.Rd0000644000175100001440000000240411352510403015314 0ustar hornikusers\name{training.report} \alias{training.report} \title{Neural network training report generator function.} \description{Function in charge of reporting the behavior of the network training. The users should modify this function according to their needs.} \usage{ training.report(net,P,T, idx.show, error.criterium) } \arguments{ \item{net}{Neural Network to train.} \item{P}{Training set input values.} \item{T}{Training set output values} \item{idx.show}{Current show index.} \item{error.criterium}{Criterium used to measure the goodness of fit.} } \value{This function does not return any value. Just useful for printing and plotting.} \author{ Manuel Castejón Limas. \email{manuel.castejon@unileon.es}\cr Joaquin Ordieres Meré. \email{j.ordieres@upm.es}\cr Ana González Marcos. \email{ana.gonzalez@unirioja.es} \cr Alpha V. Pernía Espinoza. \email{alpha.pernia@unirioja.es}\cr Francisco Javier Martinez de Pisón. \email{fjmartin@unirioja.es}\cr Fernando Alba Elías. \email{fernando.alba@unavarra.es}\cr } \references{ Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr } \seealso{ \code{\link{train}} } \keyword{neural} AMORE/NAMESPACE0000755000175100001440000000033210400013252012336 0ustar hornikusersuseDynLib(AMORE) export(newff,train,sim,sim.MLPnet,ADAPTgd.MLPnet,ADAPTgdwm.MLPnet,BATCHgd.MLPnet,BATCHgdwm.MLPnet,error.LMS,error.LMLS,error.TAO,deltaE.TAO,hfun,phifun,dphifun,graphviz.MLPnet) S3method(sim, MLPnet) AMORE/R/0000755000175100001440000000000011352605073011336 5ustar hornikusersAMORE/R/deltaE.R0000755000175100001440000001125710372726064012675 0ustar hornikusers############################################################################### hfun <- function(v, k) { result <- array(NA, dim=dim(v)) smallers <- abs(v) < k result[smallers] <- (v[smallers]^2)/2 * (1 - v[smallers]^2 / k^2 + v[smallers]^4 / (3*k^4) ) result[!smallers] <- k^2 / 6 return (result) } ############################################################################### phifun <- function (v, k) { result <- array(NA, dim=dim(v)) smallers <- abs(v) < k result[smallers] <- v[smallers] * ( 1-( v[smallers]^2 / k^2) )^2 result[!smallers] <- 0 return (result) } ############################################################################### dphifun <- function(v,k) { result <- array(NA, dim=dim(v)) smallers <- abs(v) < k result[smallers] <- (sqrt(1-(v[smallers]^2/k^2))) - ( (v[smallers]^2/k^2) * (1-(v[smallers]^2/k^2))) result[!smallers] <- 0 return (result) } ############################################################################### # DELTA ERROR TAO ############################################################################### deltaE.TAO <- function (arguments) { prediction <- arguments[[1]] target <- arguments[[2]] Stao <- arguments[[3]]$deltaE$Stao # the third argument is the net. residual <- prediction - target scaled.residual <- residual / Stao c1 <- 1.56 c2 <- 6.08 bf <- c1^2 / 12 h1 <- hfun(scaled.residual, c1) h2 <- hfun(scaled.residual, c2) phi1 <- phifun(scaled.residual,c1) phi2 <- phifun(scaled.residual,c2) if (sum(phi1 * residual) == 0.0) { dS2e <- 0.0 } else { dS2e <- Stao * (sum(phi1) / (sum(phi1*residual))) } result <-mean(2*Stao*dS2e*h2 + phi2*(Stao - dS2e * residual)) return(result) } ############################################################################### # DELTA ERROR LMS ############################################################################### deltaE.LMS <- function(arguments) { prediction <- arguments[[1]] # arg1 is the prediction target <- arguments[[2]] # arg2 is the target residual <- prediction - target return(residual) } ############################################################################### # DELTA ERROR LMLS ############################################################################### deltaE.LMLS <- function(arguments) { prediction <- arguments[[1]] # arg1 is the prediction target <- arguments[[2]] # arg2 is the target residual <- prediction - target result <- residual / (1 + residual^2 / 2) return(result) } ############################################################################### # ERROR LMS ############################################################################### error.LMS <- function(arguments) { prediction <- arguments[[1]] # arg1 is the prediction target <- arguments[[2]] # arg2 is the target residual <- prediction - target result <- mean((prediction - target)^2) return(result) } ############################################################################### # ERROR LMLS ############################################################################### error.LMLS <- function(arguments) { prediction <- arguments[[1]] # arg1 is the prediction target <- arguments[[2]] # arg2 is the target residual <- prediction - target result <- mean(log(1 + residual^2 / 2)) return(result) } ############################################################################### # ERROR TAO ############################################################################### error.TAO <- function(arguments) { prediction <- arguments[[1]] # arg1 is the prediction target <- arguments[[2]] # arg2 is the target Stao <- arguments[[3]]$deltaE$Stao # arg3 is net residual <- prediction - target n.residual <- nrow(residual) perf <- NA scaled.residual <- residual / Stao c1 <- 1.56 c2 <- 6.08 bf <- c1^2 / 12 h1 <- hfun(scaled.residual, c1) h2 <- hfun(scaled.residual, c2) new.Stao <- Stao*sqrt(sum(h1)/(n.residual * bf)) # n.residuals o n.residuals*n.output.MLPneurons ?? tao.error.squared <- new.Stao^2 * mean(h2) return(list(perf=tao.error.squared, Stao=new.Stao)) } ############################################################################### AMORE/R/graphviz.R0000644000175100001440000000456610606226075013330 0ustar hornikusersgraphviz.MLPnet <- function(net,filename,digits=8) { if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } cat(file=filename," digraph AMOREnet { \n",append=FALSE); cat(file=filename,"rankdir=LR; \n",append=TRUE); cat(file=filename,"ordering=out; \n",append=TRUE); cat(file=filename,"ranksep=2; \n",append=TRUE); cat(file=filename,"nodesep=1; \n",append=TRUE); for (i in 1:length(net$layers[[1]])) { cat(file=filename,"node [shape = hexagon, color=\"green\"] ", paste("\"Input ",i,"\"",sep=""),";\n",append=TRUE); } for (ind.neuron in 1:length(net$neurons)) { neuron <- net$neuron[[ind.neuron]] ; cat(file=filename,"node [shape = record, color=\"blue\"] ",append=TRUE); cat(file=filename,neuron$id,"[label = \"{ Id=\\N | { ",append=TRUE); for ( ind.weight in 1:length(neuron$weights) ) { if (neuron$input.links[ind.weight] < 0 ) { cat(file=filename,"wi",-neuron$input.links[ind.weight],": ",round(neuron$weights[ind.weight],digits),"|",sep="",append=TRUE); } else { cat(file=filename,"w",neuron$input.link[ind.weight],": ",round(neuron$weights[ind.weight],digits),"|",sep="",append=TRUE); } } cat(file=filename,"Bias:",round(neuron$bias,digits),"}|",neuron$activation.function,"|"," v0:", round(neuron$v0,digits),"} \" ];\n",append=TRUE) } for (i in 1:length(net$layers[[length(net$layers)]])) { cat(file=filename,"node [shape = hexagon, color=\"red\"] ", paste("\"Output ",i,"\"",sep=""),";\n",append=TRUE); } for (ind.neuron in 1:length(net$neurons)) { neuron <- net$neurons[[ind.neuron]]; for ( ind.weight in 1:length(neuron$weights)) { if (neuron$input.links[ind.weight] < 0 ) { cat(file=filename,"\"Input ",-neuron$input.links[ind.weight],"\" -> ",neuron$id," ;\n", sep="",append=TRUE); } else { cat(file=filename,neuron$input.links[ind.weight]," -> ",neuron$id," ;\n", sep="",append=TRUE); } } if (neuron$type=="output") { cat(file=filename,neuron$id," -> \"Output ",neuron$output.aims,"\" ;\n", sep="",append=TRUE); } } cat(file=filename,"}\n",append=TRUE); } AMORE/R/newff.R0000755000175100001440000002407710462153036012601 0ustar hornikusers################################# # Creates a new MLPnet object ############################# # OJO : FALTA completar la entrada de un deltae custom para aceptar como parametro la función custom newff <- function (n.neurons, learning.rate.global, momentum.global=NA, error.criterium="LMS", Stao=NA, hidden.layer="tansig", output.layer="purelin", method="ADAPTgdwm") { net <- list( layers=list(), neurons=list(), input=as.double(numeric(n.neurons[1])), output=as.double(numeric(n.neurons[length(n.neurons)])), target=as.double(numeric(n.neurons[length(n.neurons)])), deltaE=list(fname=as.integer(0),f=function(){},Stao=as.double(NA)), other.elements=list() ) if (length(n.neurons)<3) { stop("You should enter a vector containing the number of input neurons, the number of neurons of each hidden layer and the number of outputs.") } possible.activation.functions <- c("custom","tansig","sigmoid","purelin","hardlim") if ( is.na(hidden.activation.function.choice <- pmatch(hidden.layer,possible.activation.functions)) ) { stop("You should use a correct activation function for the hidden layers.") } if ( is.na(output.activation.function.choice <- pmatch(output.layer,possible.activation.functions)) ) { stop("You should use a correct activation function for the output layer.") } possible.methods <- c("ADAPTgdwm","ADAPTgd","BATCHgd","BATCHgdwm") if ( is.na(method.choice <- pmatch(method,possible.methods)) ) { stop("You should use a correct training method: ADAPTgdwm, ADAPTgd, BATCHgdwm, BATCHgd. Read the help files.") } layers.last.neuron <- cumsum(n.neurons) layers.first.neuron <- c(1,1+layers.last.neuron[1:(length(layers.last.neuron)-1)])[-c(1)]-n.neurons[1] layers.last.neuron <- layers.last.neuron[-c(1)]-n.neurons[1] net$layers[[1]] <- -c(1:n.neurons[1]) for ( ind.layer in 1:length(layers.last.neuron) ) { net$layers[[ind.layer+1]] <- layers.first.neuron[ind.layer]:layers.last.neuron[ind.layer] } input.links <- net$layers[1:(length(net$layers)-1)] output.links <- list() for ( ind.layer in 2:length(layers.last.neuron)) { output.links[[ind.layer-1]] <- layers.first.neuron[ind.layer]:layers.last.neuron[ind.layer] } output.links[[length(layers.last.neuron)]] <- NA for (ind.layer in 2:length(n.neurons)) { if (ind.layer == length(n.neurons)) { this.neuron.type="output" this.neuron.activation.function.choice <- output.activation.function.choice } else { this.neuron.type="hidden" this.neuron.activation.function.choice <- hidden.activation.function.choice } if (method == "ADAPTgd" ) { method.dep.variables <- list() method.dep.variables$delta <- as.double(0) method.dep.variables$learning.rate <- as.double(learning.rate.global) } else if (method == "ADAPTgdwm") { method.dep.variables <- list() method.dep.variables$delta <- as.double(0) method.dep.variables$learning.rate <- as.double(learning.rate.global) method.dep.variables$momentum <- as.double(momentum.global) method.dep.variables$former.weight.change <- as.double(numeric(n.neurons[ind.layer-1])) method.dep.variables$former.bias.change <- as.double(0) } else if (method == "BATCHgd" ) { method.dep.variables <- list() method.dep.variables$delta <- as.double(0) method.dep.variables$learning.rate <- as.double(learning.rate.global) method.dep.variables$sum.delta.x <- as.double(numeric(n.neurons[ind.layer-1])) method.dep.variables$sum.delta.bias <- as.double(0) } else if (method == "BATCHgdwm") { method.dep.variables <- list() method.dep.variables$delta <- as.double(0) method.dep.variables$learning.rate <- as.double(learning.rate.global) method.dep.variables$sum.delta.x <- as.double(numeric(n.neurons[ind.layer-1])) method.dep.variables$sum.delta.bias <- as.double(0) method.dep.variables$momentum <- as.double(momentum.global) method.dep.variables$former.weight.change <- as.double(numeric(n.neurons[ind.layer-1])) method.dep.variables$former.bias.change <- as.double(0) } for ( ind.MLPneuron.relative in 1:length(net$layers[[ind.layer]]) ) { ind.MLPneuron <- net$layers[[ind.layer]][[ind.MLPneuron.relative]] net$neurons[[ind.MLPneuron]] <- init.MLPneuron(id=ind.MLPneuron,type=this.neuron.type, activation.function=as.integer(this.neuron.activation.function.choice-1),output.links=output.links[[ind.layer-1]], output.aims=rep(ind.MLPneuron.relative,length(output.links[[ind.layer-1]])), input.links=input.links[[ind.layer-1]],weights=numeric(n.neurons[ind.layer-1]), bias=0, method, method.dep.variables ) } } if (error.criterium == "LMS" ) { net$deltaE$fname <- as.integer(0) # LMS_NAME 0 net$deltaE$f <- deltaE.LMS } else if (error.criterium == "LMLS") { net$deltaE$fname <- as.integer(1) # LMLS_NAME 1 net$deltaE$f <- deltaE.LMLS } else if (error.criterium == "TAO") { net$deltaE$fname <- as.integer(2) # TAO_NAME 2 net$deltaE$f <- deltaE.TAO if (missing(Stao)){ stop("You should enter the Stao value") } else { net$deltaE$Stao <-as.double(Stao) } } else { stop("You should enter either: \"LMS\", \"LMSL\" or \"TAO\". ") } class(net) <- "MLPnet" net <- random.init.MLPnet(net) return(net) } ################################# # Creates individual neurons ######################### init.MLPneuron <- function(id,type,activation.function,output.links, output.aims, input.links, weights, bias, method, method.dep.variables) { aux <- select.activation.function(activation.function) neuron <- list() neuron$id <- as.integer(id) neuron$type <- as.character(type) neuron$activation.function <- activation.function neuron$output.links <- as.integer(output.links) neuron$output.aims <- as.integer(output.aims) neuron$input.links <- as.integer(input.links) neuron$weights <- as.double(weights) neuron$bias <- as.double(bias) neuron$v0 <- as.double(0) neuron$v1 <- as.double(0) neuron$f0 <- aux$f0 neuron$f1 <- aux$f1 neuron$method <- as.character(method) neuron$method.dep.variables <- method.dep.variables class(neuron) <- "neuron" return(neuron) } ######################################### # Initialize the neuron bias and weights with random values according to the book: # Neural Networks. A comprehensive foundation. 2nd Edition. # Author: Simon Haykin. # pages = 182, 183, 184. ################################# random.init.MLPneuron <- function(net.number.weights, neuron) { extreme <- sqrt(3/net.number.weights) n.weights <- length(neuron$weights) neuron$weights <- runif(n.weights,min=-extreme,max=extreme) neuron$bias <- runif(1,min=-extreme,max=extreme) return(neuron) } ################################################# # Runs random.init.MLPneuron upon each neuron. ########################################### random.init.MLPnet <- function(net) { net.number.weights <- length(net$neurons) #number of bias terms for (ind.MLPneuron in 1:length(net$neurons)) { net.number.weights <- net.number.weights + length(net$neurons[[ind.MLPneuron]]$weights) } for ( i in 1:length(net$neurons)) { net$neurons[[i]] <- random.init.MLPneuron(net.number.weights,net$neurons[[i]] ) } return(net) } ######################################### # A simple function to bestow the neuron with the appropriate select.activation.function <- function(activation.function) { f0 <- NA f1 <- NA # a.tansig : 1/tanh(2/3) # b.tansig : 2/3 # a.sigmoid : 1.0 if (activation.function == 1 ) { # TANSIG f0 <- function (v) { a.tansig <- 1.715904708575539 b.tansig <- 0.6666666666666667 return ( a.tansig * tanh( v * b.tansig ) ) } f1 <- function (v) { # realmente usaremos f1= b.tansig/a.tansig*(a.tansig-f0)*(a.tansig+f0) a.tansig <- 1.715904708575539 b.tansig <- 0.6666666666666667 return( a.tansig * b.tansig * (1-tanh( v * b.tansig )^2) ) } } else if (activation.function == 2 ) { # SIGMOID f0 <- function (v) { a.sigmoid <- 1 return( 1/(1+exp(- a.sigmoid * v)) ) } f1 <- function (v) { # realmente usaremos f1=a.sigmoid*f0*(1-f0) a.sigmoid <- 1 return ( a.sigmoid * exp(- a.sigmoid * v) / (1+exp(- a.sigmoid * v))^2 ) } } else if (activation.function == 3 ) { # PURELIN f0 <- function (v) { return( v ) } f1 <- function (v) { return( 1 ) } } else if (activation.function == 4 ) { # HARDLIM f0 <- function (v) { if (v>=0) { return(1) } else { return(0) } } f1 <- function (v) { return ( NA ) } } return(list(f0=f0,f1=f1)) } ############################################################## # Manually set the learning rate and momentum for each neuron ############################################################## # deprecated. do not use. does not work #set.learning.rate.and.momentum <- function(net, learning.rate, momentum) { # for (i in 1:length(net$neurons)) { # net$neurons[[i]]$learning.rate <- learning.rate # net$neurons[[i]]$momentum <- momentum # } # return(net) #} # # AMORE/R/sim.R0000755000175100001440000001757610400011114012250 0ustar hornikusers################################################## sim <-function (net,P,...) { UseMethod("sim") } ################################################## sim.MLPnet <- function(net,P,...) { if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } P <- as.matrix(P) ytrans <- matrix(0, nrow=length(net$layer[[length(net$layer)]]), ncol=nrow(P)) ytrans <- .Call("sim_Forward_MLPnet", net, t(P), ytrans, .GlobalEnv, PACKAGE="AMORE") return(t(ytrans)) } ############################################################################################### train <- function(net, P, T, Pval=NULL, Tval=NULL, error.criterium="LMS", report=TRUE, n.shows, show.step, Stao=NA, prob=NULL) { if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } P <- as.matrix(P) T <- as.matrix(T) epoch.show.step <- 0 n.muestras <- nrow(P) net$deltaE$fname <- as.integer(0) # custom case if(error.criterium=="LMS") { net$deltaE$fname <- as.integer(1) net$deltaE$f <- deltaE.LMS } else if(error.criterium=="LMLS") { net$deltaE$fname <- as.integer(2) net$deltaE$f <- deltaE.LMLS } else if(error.criterium=="TAO") { if (missing(Stao)) { stop("You should enter the value of Stao") } else { net$deltaE$fname <- as.integer(3) net$deltaE$f <- deltaE.TAO net$deltaE$Stao <- Stao } } method <- net$neurons[[1]]$method if (method =="ADAPTgd") { train.method <- ADAPTgd.MLPnet } else if (method =="ADAPTgdwm") { train.method <- ADAPTgdwm.MLPnet } else if (method =="BATCHgd") { train.method <- BATCHgd.MLPnet } else if (method =="BATCHgdwm") { train.method <- BATCHgdwm.MLPnet } if (is.null(prob)) { if (!is.null(Pval) & !is.null(Tval)) { Merror <- matrix(NA, ncol=2, nrow=n.shows) Pval <- as.matrix(Pval) Tval <- as.matrix(Tval) min.error.val <- Inf bestnet <- net for (idx.show in 1:n.shows) { net <- train.method(net, P, T, show.step) P.sim <- sim.MLPnet(net,P) Pval.sim <- sim.MLPnet(net,Pval) if(error.criterium=="LMS") { error <- error.LMS(list(prediction=P.sim, target=T )) error.val <- error.LMS(list(prediction=Pval.sim, target=Tval )) } else if(error.criterium=="LMLS") { error <- error.LMLS(list(prediction=P.sim, target=T )) error.val <- error.LMLS(list(prediction=Pval.sim, target=Tval )) } else if(error.criterium=="TAO") { error.aux <- error.TAO(list(prediction=P.sim, target=T, net=net)) error <- error.aux$perf new.tao <- error.aux$Stao error.val <- error.TAO(list(prediction=Pval.sim, target=Tval, net=net))$perf cat("Stao:", new.tao, " ") } Merror [idx.show,] <- c(error,error.val) if (error.val <= min.error.val ) { min.error.val <- error.val bestnet <- net cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\t BEST NET\n", sep=" ")) } else { cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\n", sep=" ")) } } net <- bestnet } else { Merror <- matrix(NA, ncol=1, nrow=n.shows) for (idx.show in 1:n.shows) { net <- train.method(net, P, T, show.step) if (report) { auxReport <- training.report(net, P, T, idx.show, error.criterium) net$other.elements$Stao <- auxReport$new.tao Merror [idx.show,1] <- auxReport$error } } } } else { if (!is.null(Pval) & !is.null(Tval)) { Merror <- matrix(NA, ncol=2, nrow=n.shows) Pval <- as.matrix(Pval) Tval <- as.matrix(Tval) min.error.val <- Inf bestnet <- net for (idx.show in 1:n.shows) { orden <- sample(1:n.muestras, n.muestras, replace=TRUE , prob=prob) net <- train.method(net, P[orden, , drop=FALSE], T[orden, , drop=FALSE], show.step) P.sim <- sim.MLPnet(net,P) Pval.sim <- sim.MLPnet(net,Pval) if(error.criterium=="LMS") { error <- error.LMS(list(prediction=P.sim, target=T )) error.val <- error.LMS(list(prediction=Pval.sim, target=Tval )) } else if(error.criterium=="LMLS") { error <- error.LMLS(list(prediction=P.sim, target=T )) error.val <- error.LMLS(list(prediction=Pval.sim, target=Tval )) } else if(error.criterium=="TAO") { error.aux <- error.TAO(list(prediction=P.sim, target=T, net=net)) error <- error.aux$perf new.tao <- error.aux$Stao error.val <- error.TAO(list(prediction=Pval.sim, target=Tval, net=net))$perf cat("Stao:", new.tao, " ") } Merror [idx.show,] <- c(error,error.val) if (error.val <= min.error.val ) { min.error.val <- error.val bestnet <- net cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\t BEST NET\n", sep=" ")) } else { cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\n", sep=" ")) } } net <- bestnet } else { Merror <- matrix(NA, ncol=1, nrow=n.shows) for (idx.show in 1:n.shows) { orden <- sample(1:n.muestras, n.muestras, replace=TRUE , prob=prob) net <- train.method(net, P[orden, , drop=FALSE], T[orden, , drop=FALSE], show.step) if (report) { auxReport <- training.report(net, P, T, idx.show, error.criterium) net$other.elements$Stao <- auxReport$new.tao Merror [idx.show,1] <- auxReport$error } } } } return(list(net=net,Merror=Merror)) } ############################################################################################### training.report <- function(net,P,T, idx.show, error.criterium) { ########### BEGIN do not delete ########## if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } new.tao <- NA ########### END do not delete ############ P.sim <- sim.MLPnet(net,P) # par(mfrow=c(1,2)) # plot(P,T, col="red", pch="*", ylim=range(rbind(T,P.sim))) # points(P,P.sim, col="blue", pch="+") # plot(P, ideal, col="red", pch=".", ylim=range(rbind(ideal,P.sim))) # points(P,P.sim, col="blue", pch=".") if(error.criterium=="LMS") { error <- error.LMS(list(prediction=P.sim, target=T)) } else if(error.criterium=="LMLS") { error <- error.LMLS(list(prediction=P.sim, target=T)) ########### BEGIN do not delete (only minor changes allowed) ########## } else if(error.criterium=="TAO") { error.aux <- error.TAO(list(prediction=P.sim, target=T, net=net)) error <- error.aux$perf new.tao <- error.aux$Stao cat("Stao:", new.tao, " ") } ########### END do not delete ############ cat(paste("index.show:", idx.show, error.criterium,error,"\n", sep=" ")) ########### BEGIN do not delete ########## return(list(error=error,new.tao=new.tao)) ########### END do not delete ############ } AMORE/R/trMethods.R0000755000175100001440000000613110400011164013417 0ustar hornikusers########################################################## # Adaptative Gradient Descent (without momentum) ########################################################## ADAPTgd.MLPnet <- function(net, P, T, n.epochs) { if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } net <- .Call("ADAPTgd_loop_MLPnet", net, t(P), t(T),as.integer(n.epochs), new.env(), PACKAGE="AMORE" ) return(net) } ################################################## ########################################################## # Adaptative Gradient Descent (with momentum) ########################################################## ADAPTgdwm.MLPnet <- function(net,P,T, n.epochs) { if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } net <- .Call("ADAPTgdwm_loop_MLPnet", net, t(P), t(T), as.integer(n.epochs), new.env(), PACKAGE="AMORE" ) return(net) } ################################################## ############################################################## # BATCHgd ( BATCH gradient descent without momentum ) ############################################################## BATCHgd.MLPnet <- function(net, P, T, n.epochs) { # Each pattern is a row of P, if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } ##### First Step: BATCHgd.Forward.MLPnet for (ind.MLPneuron in 1:length(net$neurons)) { net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.bias <- as.double(0) net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x <- as.double(numeric(length(net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x))) } net <- .Call("BATCHgd_loop_MLPnet", net, t(P), t(T), as.integer(n.epochs), new.env(), PACKAGE="AMORE") return(net) } ############################################################## # BATCHgdwm ( BATCH gradient descent with momentum ) ############################################################## BATCHgdwm.MLPnet <- function(net, P, T, n.epochs) { # Each pattern is a row of P, if (class(net)!="MLPnet") { stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again"); } ##### First step: BATCHgdwm.Forward.MLPnet for (ind.MLPneuron in 1:length(net$neurons)) { net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.bias <- as.double(0) net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x <- as.double(numeric(length(net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x))) } net <- .Call("BATCHgdwm_loop_MLPnet", net, t(P), t(T), as.integer(n.epochs), new.env(), PACKAGE="AMORE") return(net) } ####### AMORE/src/0000755000175100001440000000000011352512213011715 5ustar hornikusersAMORE/src/ADAPTgd.c0000755000175100001440000001554010372726064013251 0ustar hornikusers #include #include #include #include #include #include #include "AMORE.h" /** ########################################################## # Adaptative Gradient Descent (without momentum) ########################################################## **/ SEXP ADAPTgd_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho) { int * Ptransdim, *Ttransdim, fila, columna, Pcounter, Tcounter; int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight; double aux_DELTA, x_input, a, bias_change, weight_change; int epoch, n_epochs; SEXP R_fcall, args, arg1, arg2, arg3; SEXP aims, net; struct AMOREneuron * ptneuron, * pt_that_neuron; struct AMOREnet * ptnet; double aux1, aux2; PROTECT(net=duplicate(origNet)); Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP)); Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP)); n_epochs = INTEGER(nepochs)[0]; ptnet = copynet_RC(net); for (epoch=0; epoch < n_epochs; epoch++) { for (fila=0, Pcounter=0, Tcounter=0; fila < Ptransdim[1]; fila++) { for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) { ptnet->input[columna] = REAL(Ptrans)[Pcounter]; } for( columna =0; columna < Ttransdim[0] ; columna++, Tcounter++) { ptnet->target[columna] = REAL(Ttrans)[Tcounter]; } /* BEGIN void adaptgd_forward_mlpnet(AMOREnet * ptnet) */ for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) { ptneuron = ptnet->neurons[ind_neuron]; /* BEGIN adaptgd_forward_MLPneuron */ for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } a += ptneuron->weights[ind_weight] * x_input; } a += ptneuron->bias; switch (ptneuron->actf) { case TANSIG_ACTF: ptneuron->v0 = a_tansig * tanh(a * b_tansig); ptneuron->v1 = b_tansig / a_tansig * (a_tansig - ptneuron->v0)*(a_tansig + ptneuron->v0); break; case SIGMOID_ACTF: ptneuron->v0 = 1/(1+exp(- a_sigmoid * a)) ; ptneuron->v1 = a_sigmoid * ptneuron->v0 * ( 1 - ptneuron->v0 ); break; case PURELIN_ACTF: ptneuron->v0 = a; ptneuron->v1 = 1; break; case HARDLIM_ACTF: if (a>=0) { ptneuron->v0 = 1.0; } else { ptneuron->v0 = 0.0; } ptneuron->v1 = NA_REAL; break; case CUSTOM_ACTF: PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args)); ptneuron->v0 = REAL(eval (R_fcall, rho))[0]; PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args)); ptneuron->v1 = REAL(eval (R_fcall, rho))[0]; UNPROTECT(4); break; } /* END adaptgd_forward_MLPneuron */ } /* END void adaptgd_forward_mlpnet(AMOREnet * ptnet) */ /* BEGIN void adaptgd_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */ for ( ind_neuron=ptnet->last_neuron; ind_neuron >=0; ind_neuron-- ) { ptneuron=ptnet->neurons[ind_neuron]; /**/ if (ptneuron->type==TYPE_OUTPUT) { switch(ptnet->deltaE.name) { case LMS_NAME: aux_DELTA = ptneuron->v0 - ptnet->target[-1+ptneuron->output_aims[0]]; break; case LMLS_NAME: aux_DELTA = ptneuron->v0- ptnet->target[-1+ptneuron->output_aims[0]]; aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2); break; default: /* if (ptneuron->deltaE.name==TAO_NAME) de momento tao es como custom*/ /* ####### OJO FALTA cambiar el TAO */ PROTECT(args = allocVector(VECSXP,3) ); PROTECT(arg3 = net ); PROTECT(arg2 = allocVector(REALSXP,1) ); PROTECT(arg1 = allocVector(REALSXP,1) ); REAL(arg1)[0] = ptneuron->v0; REAL(arg2)[0] = ptnet->target[-1+ptneuron->output_aims[0]]; SET_VECTOR_ELT(args, 0, arg1); SET_VECTOR_ELT(args, 1, arg2); SET_VECTOR_ELT(args, 2, arg3); PROTECT(R_fcall = lang2(DELTAE_F, args) ); aux_DELTA = REAL(eval (R_fcall, rho))[0]; UNPROTECT(5); break; }; } else { aux_DELTA = 0.0; for ( ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) { pt_that_neuron = ptneuron->output_links[ind_other_neuron]; that_aim = -1+ptneuron->output_aims[ind_other_neuron]; aux_DELTA += pt_that_neuron->method_dep_variables.adaptgd.delta * pt_that_neuron->weights[that_aim] ; } } ptneuron->method_dep_variables.adaptgd.delta = aux_DELTA * ptneuron->v1; bias_change = - ptneuron->method_dep_variables.adaptgd.learning_rate * ptneuron->method_dep_variables.adaptgd.delta; ptneuron->bias += bias_change; for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } weight_change = - ptneuron->method_dep_variables.adaptgd.learning_rate * ptneuron->method_dep_variables.adaptgd.delta * x_input ; ptneuron->weights[ind_weight] += weight_change; } /**/ } /* END void adaptgd_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */ } } copynet_CR (net, ptnet); UNPROTECT(1); return (net); } AMORE/src/ADAPTgdwm.c0000755000175100001440000001646110372726064013620 0ustar hornikusers #include #include #include #include #include #include #include "AMORE.h" /** ########################################################## # Adaptative Gradient Descent (with momentum) ########################################################## */ SEXP ADAPTgdwm_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho) { int * Ptransdim, *Ttransdim, fila, columna, Pcounter, Tcounter; int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight; double aux_DELTA, x_input, a, bias_change, weight_change; int epoch, n_epochs; SEXP R_fcall, args, arg1, arg2, arg3; SEXP aims, net; struct AMOREneuron * ptneuron, * pt_that_neuron; struct AMOREnet * ptnet; double aux1, aux2; PROTECT(net=duplicate(origNet)); Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP)); Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP)); n_epochs = INTEGER(nepochs)[0]; ptnet = copynet_RC(net); for (epoch=0; epoch < n_epochs; epoch++) { for (fila=0, Pcounter=0, Tcounter=0; fila < Ptransdim[1]; fila++) { for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) { ptnet->input[columna] = REAL(Ptrans)[Pcounter]; } for( columna =0; columna < Ttransdim[0] ; columna++, Tcounter++) { ptnet->target[columna] = REAL(Ttrans)[Tcounter]; } /** BEGIN void adaptgdwm_forward_mlpnet(AMOREnet * ptnet) */ for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) { ptneuron = ptnet->neurons[ind_neuron]; /* BEGIN adaptgdwm_forward_MLPneuron */ for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } a += ptneuron->weights[ind_weight] * x_input; } a += ptneuron->bias; switch (ptneuron->actf) { case TANSIG_ACTF: ptneuron->v0 = a_tansig * tanh(a * b_tansig); ptneuron->v1 = b_tansig / a_tansig * (a_tansig - ptneuron->v0)*(a_tansig + ptneuron->v0); break; case SIGMOID_ACTF: ptneuron->v0 = 1/(1+exp(- a_sigmoid * a)) ; ptneuron->v1 = a_sigmoid * ptneuron->v0 * ( 1 - ptneuron->v0 ); break; case PURELIN_ACTF: ptneuron->v0 = a; ptneuron->v1 = 1; break; case HARDLIM_ACTF: if (a>=0) { ptneuron->v0 = 1.0; } else { ptneuron->v0 = 0.0; } ptneuron->v1 = NA_REAL; break; case CUSTOM_ACTF: PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args)); ptneuron->v0 = REAL(eval (R_fcall, rho))[0]; PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args)); ptneuron->v1 = REAL(eval (R_fcall, rho))[0]; UNPROTECT(4); break; } /* END adaptgdwm_forward_MLPneuron */ } /** END void adaptgdwm_forward_mlpnet(AMOREnet * ptnet) */ /* BEGIN void adaptgdwm_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */ for ( ind_neuron=ptnet->last_neuron; ind_neuron >=0; ind_neuron-- ) { ptneuron=ptnet->neurons[ind_neuron]; /**/ if (ptneuron->type==TYPE_OUTPUT) { switch(ptnet->deltaE.name) { case LMS_NAME: aux_DELTA = ptneuron->v0 - ptnet->target[-1+ptneuron->output_aims[0]]; break; case LMLS_NAME: aux_DELTA = ptneuron->v0- ptnet->target[-1+ptneuron->output_aims[0]]; aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2); break; default: /** if (ptneuron->deltaE.name==TAO_NAME) de momento tao es como custom*/ /** ####### OJO FALTA cambiar el TAO */ PROTECT(args = allocVector(VECSXP,3) ); PROTECT(arg3 = net ); PROTECT(arg2 = allocVector(REALSXP,1) ); PROTECT(arg1 = allocVector(REALSXP,1) ); REAL(arg1)[0] = ptneuron->v0; REAL(arg2)[0] = ptnet->target[-1+ptneuron->output_aims[0]]; SET_VECTOR_ELT(args, 0, arg1); SET_VECTOR_ELT(args, 1, arg2); SET_VECTOR_ELT(args, 2, arg3); PROTECT(R_fcall = lang2(DELTAE_F, args) ); aux_DELTA = REAL(eval (R_fcall, rho))[0]; UNPROTECT(5); break; }; } else { aux_DELTA = 0.0; for ( ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) { pt_that_neuron = ptneuron->output_links[ind_other_neuron]; that_aim = -1+ptneuron->output_aims[ind_other_neuron]; aux_DELTA += pt_that_neuron->method_dep_variables.adaptgdwm.delta * pt_that_neuron->weights[that_aim] ; } } ptneuron->method_dep_variables.adaptgdwm.delta = aux_DELTA * ptneuron->v1; bias_change = ptneuron->method_dep_variables.adaptgdwm.momentum * ptneuron->method_dep_variables.adaptgdwm.former_bias_change - ptneuron->method_dep_variables.adaptgdwm.learning_rate * ptneuron->method_dep_variables.adaptgdwm.delta; ptneuron->bias += bias_change; ptneuron->method_dep_variables.adaptgdwm.former_bias_change <- bias_change; for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } weight_change = ptneuron->method_dep_variables.adaptgdwm.momentum * ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_weight] - ptneuron->method_dep_variables.adaptgdwm.learning_rate * ptneuron->method_dep_variables.adaptgdwm.delta * x_input ; ptneuron->weights[ind_weight] += weight_change; ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_weight] = weight_change; } /**/ } /* END void adaptgdwm_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */ } } copynet_CR (net, ptnet); UNPROTECT(1); return (net); } AMORE/src/AMORE.h0000755000175100001440000001666110372726064012762 0ustar hornikusers#define a_tansig 1.715904708575539 #define b_tansig 0.6666666666666667 #define b_split_a 0.3885219635652736 #define a_sigmoid 1.0 /* ************************************************ */ /* net elements */ #define id_NET_LAYERS 0 #define id_NET_NEURONS 1 #define id_NET_INPUT 2 #define id_NET_OUTPUT 3 #define id_NET_TARGET 4 #define id_NET_DELTAE 5 #define id_NET_OTHER_ELEMENTS 4 /**/ #define NET_LAYERS VECTOR_ELT(net,id_NET_LAYERS) #define NET_NEURONS VECTOR_ELT(net,id_NET_NEURONS) #define NET_INPUT VECTOR_ELT(net,id_NET_INPUT) #define NET_OUTPUT VECTOR_ELT(net,id_NET_OUTPUT) #define NET_TARGET VECTOR_ELT(net,id_NET_TARGET) #define NET_DELTAE VECTOR_ELT(net,id_NET_DELTAE) #define NET_OTHER_ELEMENTS VECTOR_ELT(net,id_NET_OTHER_ELEMENTS) /* neuron elements */ #define id_ID 0 #define id_TYPE 1 #define id_ACTIVATION_FUNCTION 2 #define id_OUTPUT_LINKS 3 #define id_OUTPUT_AIMS 4 #define id_INPUT_LINKS 5 #define id_WEIGHTS 6 #define id_BIAS 7 #define id_V0 8 #define id_V1 9 #define id_F0 10 #define id_F1 11 #define id_METHOD 12 #define id_METHOD_DEP_VARIABLES 13 /**/ #define ID VECTOR_ELT(neuron,id_ID) #define TYPE VECTOR_ELT(neuron,id_TYPE) #define ACTIVATION_FUNCTION VECTOR_ELT(neuron,id_ACTIVATION_FUNCTION) #define OUTPUT_LINKS VECTOR_ELT(neuron,id_OUTPUT_LINKS) #define OUTPUT_AIMS VECTOR_ELT(neuron,id_OUTPUT_AIMS) #define INPUT_LINKS VECTOR_ELT(neuron,id_INPUT_LINKS) #define WEIGHTS VECTOR_ELT(neuron,id_WEIGHTS) #define BIAS VECTOR_ELT(neuron,id_BIAS) #define V0 VECTOR_ELT(neuron,id_V0) #define V1 VECTOR_ELT(neuron,id_V1) #define F0 VECTOR_ELT(neuron,id_F0) #define F1 VECTOR_ELT(neuron,id_F1) #define METHOD VECTOR_ELT(neuron,id_METHOD) #define METHOD_DEP_VARIABLES VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES) /* METHOD DEPENDENT VARIABLES */ /* ADAPTgd Adaptative Gradient Descent */ #define id_ADAPTgd_DELTA 0 #define id_ADAPTgd_LEARNING_RATE 1 /**/ #define ADAPTgd_DELTA VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgd_DELTA) #define ADAPTgd_LEARNING_RATE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgd_LEARNING_RATE) /* ADAPTgdwm Adaptative Gradient Descent with Momentum */ #define id_ADAPTgdwm_DELTA 0 #define id_ADAPTgdwm_LEARNING_RATE 1 #define id_ADAPTgdwm_MOMENTUM 2 #define id_ADAPTgdwm_FORMER_WEIGHT_CHANGE 3 #define id_ADAPTgdwm_FORMER_BIAS_CHANGE 4 /**/ #define ADAPTgdwm_DELTA VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_DELTA) #define ADAPTgdwm_LEARNING_RATE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_LEARNING_RATE) #define ADAPTgdwm_MOMENTUM VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_MOMENTUM) #define ADAPTgdwm_FORMER_WEIGHT_CHANGE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_FORMER_WEIGHT_CHANGE) #define ADAPTgdwm_FORMER_BIAS_CHANGE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_FORMER_BIAS_CHANGE) /* BATCHgd BATCH Gradient Descent */ #define id_BATCHgd_DELTA 0 #define id_BATCHgd_LEARNING_RATE 1 #define id_BATCHgd_SUM_DELTA_X 2 #define id_BATCHgd_SUM_DELTA_BIAS 3 /**/ #define BATCHgd_DELTA VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_DELTA) #define BATCHgd_LEARNING_RATE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_LEARNING_RATE) #define BATCHgd_SUM_DELTA_X VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_SUM_DELTA_X) #define BATCHgd_SUM_DELTA_BIAS VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_SUM_DELTA_BIAS) /* BATCHgdwm BATCH Gradient Descent with Momentum */ #define id_BATCHgdwm_DELTA 0 #define id_BATCHgdwm_LEARNING_RATE 1 #define id_BATCHgdwm_SUM_DELTA_X 2 #define id_BATCHgdwm_SUM_DELTA_BIAS 3 #define id_BATCHgdwm_MOMENTUM 4 #define id_BATCHgdwm_FORMER_WEIGHT_CHANGE 5 #define id_BATCHgdwm_FORMER_BIAS_CHANGE 6 /**/ #define BATCHgdwm_DELTA VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_DELTA) #define BATCHgdwm_LEARNING_RATE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_LEARNING_RATE) #define BATCHgdwm_SUM_DELTA_X VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_SUM_DELTA_X) #define BATCHgdwm_SUM_DELTA_BIAS VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_SUM_DELTA_BIAS) #define BATCHgdwm_MOMENTUM VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_MOMENTUM) #define BATCHgdwm_FORMER_WEIGHT_CHANGE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_FORMER_WEIGHT_CHANGE) #define BATCHgdwm_FORMER_BIAS_CHANGE VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_FORMER_BIAS_CHANGE) /* OTHER ELEMENTS */ #define id_DELTAE_NAME 0 #define id_DELTAE_F 1 #define id_DELTAE_STAO 2 #define DELTAE_NAME VECTOR_ELT(VECTOR_ELT(net,id_NET_DELTAE), id_DELTAE_NAME ) #define DELTAE_F VECTOR_ELT(VECTOR_ELT(net,id_NET_DELTAE), id_DELTAE_F ) #define DELTAE_STAO VECTOR_ELT(VECTOR_ELT(net,id_NET_DELTAE), id_DELTAE_STAO ) /**/ #define CUSTOM_NAME 0 #define LMS_NAME 1 #define LMLS_NAME 2 #define TAO_NAME 3 #define CUSTOM_ACTF 0 #define TANSIG_ACTF 1 #define SIGMOID_ACTF 2 #define PURELIN_ACTF 3 #define HARDLIM_ACTF 4 #define TYPE_HIDDEN 0 #define TYPE_OUTPUT 1 #define METHOD_ADAPTgd 0 #define METHOD_ADAPTgdwm 1 #define METHOD_BATCHgd 2 #define METHOD_BATCHgdwm 3 struct AMOREneuron { int id, type, actf; int last_input_link, last_output_link; int * input_links; double * weights; struct AMOREneuron ** output_links; int * output_aims; double bias; double v0; double v1; int method; union { struct { double delta; double learning_rate; } adaptgd; struct { double delta; double learning_rate; double momentum; double * former_weight_change; double former_bias_change; } adaptgdwm; struct { double delta; double learning_rate; double * sum_delta_x; double sum_delta_bias; } batchgd; struct { double delta; double learning_rate; double * sum_delta_x; double sum_delta_bias; double momentum; double * former_weight_change; double former_bias_change; } batchgdwm; } method_dep_variables; }; struct AMOREnet { struct AMOREneuron *** layers; int last_layer; int * layer_size; struct AMOREneuron ** neurons; int last_neuron; double * input; int last_input; double * output; int last_output; double * target; struct { char name; double stao; } deltaE; }; struct AMOREnet * copynet_RC (SEXP net); void copynet_CR (SEXP net, struct AMOREnet * ptnet); AMORE/src/BATCHgd.c0000755000175100001440000001710710372726064013242 0ustar hornikusers/* ############################################################## # batchgdgd ( batchgd gradient descent without momentum ) ############################################################## */ #include #include #include #include #include #include #include "AMORE.h" /** ########################################################## # BATCH Gradient Descent (without momentum) ########################################################## **/ SEXP BATCHgd_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho) { int * Ptransdim, *Ttransdim, fila, columna, Pcounter, Tcounter; int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight; double aux_DELTA, x_input, a, bias_change, weight_change; int epoch, n_epochs; SEXP R_fcall, args, arg1, arg2, arg3; SEXP aims, net; struct AMOREneuron * ptneuron, * pt_that_neuron; struct AMOREnet * ptnet; double aux1, aux2; PROTECT(net=duplicate(origNet)); Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP)); Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP)); n_epochs = INTEGER(nepochs)[0]; ptnet = copynet_RC(net); for (epoch=0; epoch < n_epochs; epoch++) { for (fila=0, Pcounter=0, Tcounter=0; fila < Ptransdim[1]; fila++) { for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) { ptnet->input[columna] = REAL(Ptrans)[Pcounter]; } for( columna =0; columna < Ttransdim[0] ; columna++, Tcounter++) { ptnet->target[columna] = REAL(Ttrans)[Tcounter]; } /* BEGIN void batchgd_forward_mlpnet(AMOREnet * ptnet) */ for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) { ptneuron = ptnet->neurons[ind_neuron]; /* BEGIN batchgd_forward_MLPneuron */ for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } a += ptneuron->weights[ind_weight] * x_input; } a += ptneuron->bias; switch (ptneuron->actf) { case TANSIG_ACTF: ptneuron->v0 = a_tansig * tanh(a * b_tansig); ptneuron->v1 = b_tansig / a_tansig * (a_tansig - ptneuron->v0)*(a_tansig + ptneuron->v0); break; case SIGMOID_ACTF: ptneuron->v0 = 1/(1+exp(- a_sigmoid * a)) ; ptneuron->v1 = a_sigmoid * ptneuron->v0 * ( 1 - ptneuron->v0 ); break; case PURELIN_ACTF: ptneuron->v0 = a; ptneuron->v1 = 1; break; case HARDLIM_ACTF: if (a>=0) { ptneuron->v0 = 1.0; } else { ptneuron->v0 = 0.0; } ptneuron->v1 = NA_REAL; break; case CUSTOM_ACTF: PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args)); ptneuron->v0 = REAL(eval (R_fcall, rho))[0]; PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args)); ptneuron->v1 = REAL(eval (R_fcall, rho))[0]; UNPROTECT(4); break; } /* END batchgd_forward_MLPneuron */ } /* END void batchgd_forward_mlpnet(AMOREnet * ptnet) */ /* BEGIN void Parcial_batchgd_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */ for ( ind_neuron=ptnet->last_neuron; ind_neuron >=0; ind_neuron-- ) { ptneuron=ptnet->neurons[ind_neuron]; /**/ if (ptneuron->type==TYPE_OUTPUT) { switch(ptnet->deltaE.name) { case LMS_NAME: aux_DELTA = ptneuron->v0 - ptnet->target[-1+ptneuron->output_aims[0]]; break; case LMLS_NAME: aux_DELTA = ptneuron->v0- ptnet->target[-1+ptneuron->output_aims[0]]; aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2); break; default: /* if (ptneuron->deltaE.name==TAO_NAME) de momento tao es como custom*/ /* ####### OJO FALTA cambiar el TAO */ PROTECT(args = allocVector(VECSXP,3) ); PROTECT(arg3 = net ); PROTECT(arg2 = allocVector(REALSXP,1) ); PROTECT(arg1 = allocVector(REALSXP,1) ); REAL(arg1)[0] = ptneuron->v0; REAL(arg2)[0] = ptnet->target[-1+ptneuron->output_aims[0]]; SET_VECTOR_ELT(args, 0, arg1); SET_VECTOR_ELT(args, 1, arg2); SET_VECTOR_ELT(args, 2, arg3); PROTECT(R_fcall = lang2(DELTAE_F, args) ); aux_DELTA = REAL(eval (R_fcall, rho))[0]; UNPROTECT(5); break; }; } else { aux_DELTA = 0.0; for ( ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) { pt_that_neuron = ptneuron->output_links[ind_other_neuron]; that_aim = -1+ptneuron->output_aims[ind_other_neuron]; aux_DELTA += pt_that_neuron->method_dep_variables.batchgd.delta * pt_that_neuron->weights[that_aim] ; } } ptneuron->method_dep_variables.batchgd.delta = aux_DELTA * ptneuron->v1; for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_weight] += ptneuron->method_dep_variables.batchgd.delta * x_input ; } ptneuron->method_dep_variables.batchgd.sum_delta_bias += ptneuron->method_dep_variables.batchgd.delta ; } /*/ End parcial backwards*/ } /* end bucle fila */ /** BEGIN UPDATEWEIGHTS */ for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) { ptneuron = ptnet->neurons[ind_neuron]; bias_change = - ptneuron->method_dep_variables.batchgd.learning_rate * ptneuron->method_dep_variables.batchgd.sum_delta_bias; ptneuron->bias += bias_change; for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) { weight_change = - ptneuron->method_dep_variables.batchgd.learning_rate * ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_weight] ; ptneuron->weights[ind_weight] += weight_change; } /**/ } /* END UPDATE WEIGHTS */ } /* end epoch loop*/ copynet_CR (net, ptnet); UNPROTECT(1); return (net); } AMORE/src/BATCHgdwm.c0000755000175100001440000001754110372726064013610 0ustar hornikusers/** ############################################################## # batchgdwm ( BATCH gradient descent WITH momentum ) ############################################################## */ #include #include #include #include #include #include #include "AMORE.h" SEXP BATCHgdwm_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho) { int * Ptransdim, *Ttransdim, fila, columna, Pcounter, Tcounter; int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight; double aux_DELTA, x_input, a, bias_change, weight_change; int epoch, n_epochs; SEXP R_fcall, args, arg1, arg2, arg3; SEXP aims, net; struct AMOREneuron * ptneuron, * pt_that_neuron; struct AMOREnet * ptnet; double aux1, aux2; PROTECT(net=duplicate(origNet)); Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP)); Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP)); n_epochs = INTEGER(nepochs)[0]; ptnet = copynet_RC(net); for (epoch=0; epoch < n_epochs; epoch++) { for (fila=0, Pcounter=0, Tcounter=0; fila < Ptransdim[1]; fila++) { for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) { ptnet->input[columna] = REAL(Ptrans)[Pcounter]; } for( columna =0; columna < Ttransdim[0] ; columna++, Tcounter++) { ptnet->target[columna] = REAL(Ttrans)[Tcounter]; } /* BEGIN void batchgdwm_forward_mlpnet(AMOREnet * ptnet) */ for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) { ptneuron = ptnet->neurons[ind_neuron]; /* BEGIN batchgdwm_forward_MLPneuron */ for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } a += ptneuron->weights[ind_weight] * x_input; } a += ptneuron->bias; switch (ptneuron->actf) { case TANSIG_ACTF: ptneuron->v0 = a_tansig * tanh(a * b_tansig); ptneuron->v1 = b_tansig / a_tansig * (a_tansig - ptneuron->v0)*(a_tansig + ptneuron->v0); break; case SIGMOID_ACTF: ptneuron->v0 = 1/(1+exp(- a_sigmoid * a)) ; ptneuron->v1 = a_sigmoid * ptneuron->v0 * ( 1 - ptneuron->v0 ); break; case PURELIN_ACTF: ptneuron->v0 = a; ptneuron->v1 = 1; break; case HARDLIM_ACTF: if (a>=0) { ptneuron->v0 = 1.0; } else { ptneuron->v0 = 0.0; } ptneuron->v1 = NA_REAL; break; case CUSTOM_ACTF: PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args)); ptneuron->v0 = REAL(eval (R_fcall, rho))[0]; PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args)); ptneuron->v1 = REAL(eval (R_fcall, rho))[0]; UNPROTECT(4); break; } /* END batchgdwm_forward_MLPneuron */ } /* END void batchgdwm_forward_mlpnet(AMOREnet * ptnet) */ /* BEGIN void Parcial_batchgdwm_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */ for ( ind_neuron=ptnet->last_neuron; ind_neuron >=0; ind_neuron-- ) { ptneuron=ptnet->neurons[ind_neuron]; /**/ if (ptneuron->type==TYPE_OUTPUT) { switch(ptnet->deltaE.name) { case LMS_NAME: aux_DELTA = ptneuron->v0 - ptnet->target[-1+ptneuron->output_aims[0]]; break; case LMLS_NAME: aux_DELTA = ptneuron->v0- ptnet->target[-1+ptneuron->output_aims[0]]; aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2); break; default: /* if (ptneuron->deltaE.name==TAO_NAME) de momento tao es como custom*/ /* ####### OJO FALTA cambiar el TAO */ PROTECT(args = allocVector(VECSXP,3) ); PROTECT(arg3 = net ); PROTECT(arg2 = allocVector(REALSXP,1) ); PROTECT(arg1 = allocVector(REALSXP,1) ); REAL(arg1)[0] = ptneuron->v0; REAL(arg2)[0] = ptnet->target[-1+ptneuron->output_aims[0]]; SET_VECTOR_ELT(args, 0, arg1); SET_VECTOR_ELT(args, 1, arg2); SET_VECTOR_ELT(args, 2, arg3); PROTECT(R_fcall = lang2(DELTAE_F, args) ); aux_DELTA = REAL(eval (R_fcall, rho))[0]; UNPROTECT(5); break; }; } else { aux_DELTA = 0.0; for ( ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) { pt_that_neuron = ptneuron->output_links[ind_other_neuron]; that_aim = -1+ptneuron->output_aims[ind_other_neuron]; aux_DELTA += pt_that_neuron->method_dep_variables.batchgdwm.delta * pt_that_neuron->weights[that_aim] ; } } ptneuron->method_dep_variables.batchgdwm.delta = aux_DELTA * ptneuron->v1; for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_weight] += ptneuron->method_dep_variables.batchgdwm.delta * x_input ; } ptneuron->method_dep_variables.batchgdwm.sum_delta_bias += ptneuron->method_dep_variables.batchgdwm.delta ; } /*/ End parcial backwards*/ } /* end bucle fila */ /** BEGIN UPDATEWEIGHTS */ for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) { ptneuron = ptnet->neurons[ind_neuron]; bias_change = ptneuron->method_dep_variables.batchgdwm.momentum * ptneuron->method_dep_variables.batchgdwm.former_bias_change - ptneuron->method_dep_variables.batchgdwm.learning_rate * ptneuron->method_dep_variables.batchgdwm.sum_delta_bias; ptneuron->method_dep_variables.batchgdwm.former_bias_change = bias_change ; ptneuron->bias += bias_change; for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) { weight_change = ptneuron->method_dep_variables.batchgdwm.momentum * ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_weight] - ptneuron->method_dep_variables.batchgdwm.learning_rate * ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_weight] ; ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_weight] = weight_change ; ptneuron->weights[ind_weight] += weight_change; } /**/ } /* END UPDATE WEIGHTS */ } /* end epoch loop*/ copynet_CR (net, ptnet); UNPROTECT(1); return (net); } AMORE/src/copynet.c0000644000175100001440000002627110372726064013566 0ustar hornikusers #include #include #include #include #include #include #include "AMORE.h" struct AMOREnet * copynet_RC (SEXP net); void copynet_CR (SEXP net, struct AMOREnet * ptnet); /** ########################################################## # copynet_RC # Copies the SEXP net to the *ptnet ########################################################## **/ struct AMOREnet * copynet_RC (SEXP net) { struct AMOREnet * ptnet; struct AMOREneuron * ptneuron; int i, ind_neuron, ind_input_neuron, ind_output_neuron, ind_layer; SEXP neuron; int aux_neuron; ptnet = (struct AMOREnet *) R_alloc(1, sizeof(struct AMOREnet)); ptnet->last_neuron = -1+LENGTH(NET_NEURONS); ptnet->last_input = -1+LENGTH(NET_INPUT); ptnet->last_output = -1+LENGTH(NET_OUTPUT); ptnet->input = (double *) R_alloc(ptnet->last_input + 1, sizeof(double)); ptnet->output = (double *) R_alloc(ptnet->last_output + 1, sizeof(double)); ptnet->target = (double *) R_alloc(ptnet->last_output + 1, sizeof(double)); for (i=0; i <= ptnet->last_input; i++) { ptnet->input[i] = REAL(NET_INPUT)[i]; } for (i=0; i <= ptnet->last_output; i++) { ptnet->output[i] = REAL(NET_OUTPUT)[i]; ptnet->target[i] = REAL(NET_OUTPUT)[i]; } ptnet->deltaE.name = INTEGER(DELTAE_NAME)[0]; ptnet->deltaE.stao = REAL(DELTAE_STAO)[0]; ptnet->neurons = (struct AMOREneuron **) R_alloc(ptnet->last_neuron + 1, sizeof(struct AMOREneuron *)); for (ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron ++ ) { ptnet->neurons[ind_neuron] = (struct AMOREneuron *) R_alloc(1, sizeof(struct AMOREneuron)); /* do not join with the following block*/ } for (ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron ++ ) { PROTECT(neuron=VECTOR_ELT(NET_NEURONS, ind_neuron ) ); ptneuron = ptnet->neurons[ind_neuron]; ptneuron->id = INTEGER(ID)[0]; if (strcmp(CHAR(STRING_ELT(TYPE,0)),"output")==0) { ptneuron->type = TYPE_OUTPUT; } else { ptneuron->type = TYPE_HIDDEN; } ptneuron->actf = INTEGER(ACTIVATION_FUNCTION)[0] ; ptneuron->last_output_link = -1 + LENGTH(OUTPUT_LINKS) ; ptneuron->last_input_link = -1 + LENGTH(INPUT_LINKS) ; ptneuron->output_aims = (int *) R_alloc(ptneuron->last_output_link+1, sizeof(int)); ptneuron->input_links = (int *) R_alloc(ptneuron->last_input_link+1, sizeof(int)); ptneuron->output_links = (struct AMOREneuron **) R_alloc(ptneuron->last_output_link+1, sizeof(struct AMOREneuron *)); ptneuron->weights = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double)); for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) { ptneuron->input_links[ind_input_neuron] = INTEGER(INPUT_LINKS)[ind_input_neuron]; ptneuron->weights[ind_input_neuron] = REAL(WEIGHTS)[ind_input_neuron]; } for (ind_output_neuron=0; ind_output_neuron <= ptneuron->last_output_link; ind_output_neuron++) { ptneuron->output_aims[ind_output_neuron] = INTEGER(OUTPUT_AIMS)[0]; if(INTEGER(OUTPUT_LINKS)[ind_output_neuron]==NA_INTEGER){ ptneuron->output_links[ind_output_neuron] = NULL; } else { ptneuron->output_links[ind_output_neuron] = ptnet->neurons[-1+INTEGER(OUTPUT_LINKS)[ind_output_neuron]]; } } ptneuron->bias = REAL(BIAS)[0]; ptneuron->v0 = REAL(V0)[0]; ptneuron->v1 = REAL(V1)[0]; if (strcmp(CHAR(STRING_ELT(METHOD,0)),"ADAPTgd")==0) { ptneuron->method = METHOD_ADAPTgd; ptneuron->method_dep_variables.adaptgd.delta = REAL(ADAPTgd_DELTA)[0] ; ptneuron->method_dep_variables.adaptgd.learning_rate = REAL(ADAPTgd_LEARNING_RATE)[0] ; } else if (strcmp(CHAR(STRING_ELT(METHOD,0)),"ADAPTgdwm")==0) { ptneuron->method = METHOD_ADAPTgdwm; ptneuron->method_dep_variables.adaptgdwm.delta = REAL(ADAPTgdwm_DELTA)[0] ; ptneuron->method_dep_variables.adaptgdwm.learning_rate = REAL(ADAPTgdwm_LEARNING_RATE)[0] ; ptneuron->method_dep_variables.adaptgdwm.momentum = REAL(ADAPTgdwm_MOMENTUM)[0] ; ptneuron->method_dep_variables.adaptgdwm.former_bias_change = REAL(ADAPTgdwm_FORMER_BIAS_CHANGE)[0]; ptneuron->method_dep_variables.adaptgdwm.former_weight_change = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double)); for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) { ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_input_neuron] = REAL(ADAPTgdwm_FORMER_WEIGHT_CHANGE)[ind_input_neuron] ; } } else if (strcmp(CHAR(STRING_ELT(METHOD,0)),"BATCHgd")==0) { ptneuron->method = METHOD_BATCHgd; ptneuron->method_dep_variables.batchgd.delta = REAL(BATCHgd_DELTA)[0] ; ptneuron->method_dep_variables.batchgd.learning_rate = REAL(BATCHgd_LEARNING_RATE)[0] ; ptneuron->method_dep_variables.batchgd.sum_delta_x = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double)); for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) { ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_input_neuron] = REAL(BATCHgd_SUM_DELTA_X)[ind_input_neuron] ; } ptneuron->method_dep_variables.batchgd.sum_delta_bias = REAL(BATCHgd_SUM_DELTA_BIAS)[0] ; } else if (strcmp(CHAR(STRING_ELT(METHOD,0)),"BATCHgdwm")==0) { ptneuron->method = METHOD_BATCHgdwm; ptneuron->method_dep_variables.batchgdwm.delta = REAL(BATCHgdwm_DELTA)[0] ; ptneuron->method_dep_variables.batchgdwm.learning_rate = REAL(BATCHgdwm_LEARNING_RATE)[0] ; ptneuron->method_dep_variables.batchgdwm.sum_delta_x = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double)); for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) { ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_input_neuron] = REAL(BATCHgdwm_SUM_DELTA_X)[ind_input_neuron] ; } ptneuron->method_dep_variables.batchgdwm.sum_delta_bias = REAL(BATCHgdwm_SUM_DELTA_BIAS)[0] ; ptneuron->method_dep_variables.batchgdwm.momentum = REAL(BATCHgdwm_MOMENTUM)[0] ; ptneuron->method_dep_variables.batchgdwm.former_bias_change = REAL(BATCHgdwm_FORMER_BIAS_CHANGE)[0] ; ptneuron->method_dep_variables.batchgdwm.former_weight_change = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double)); for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) { ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_input_neuron] = REAL(BATCHgdwm_FORMER_WEIGHT_CHANGE)[ind_input_neuron] ; } } UNPROTECT(1); } ptnet->last_layer = -2+LENGTH(NET_LAYERS); /* the first one doesn't count */ ptnet->layer_size = (int *) R_alloc(ptnet->last_layer + 1, sizeof(int)); ptnet->layers = (struct AMOREneuron ***) R_alloc(1+ptnet->last_layer, sizeof(struct AMOREneuron **)); for (ind_layer=0; ind_layer <= ptnet->last_layer ; ind_layer++) { ptnet->layer_size[ind_layer] = LENGTH(VECTOR_ELT(NET_LAYERS, 1+ind_layer)); ptnet->layers[ind_layer] = (struct AMOREneuron **) R_alloc(ptnet->layer_size[ind_layer], sizeof(struct AMOREneuron *)); for (ind_neuron=0; ind_neuron < ptnet->layer_size[ind_layer]; ind_neuron++) { aux_neuron = -1+INTEGER(VECTOR_ELT(NET_LAYERS, 1+ind_layer))[ind_neuron]; ptnet->layers[ind_layer][ind_neuron] = ptnet->neurons[ aux_neuron ]; } } return (ptnet); } /** ################################ # copynet_CR # Copies *ptnet to SEXP net ################################ **/ void copynet_CR (SEXP net, struct AMOREnet * ptnet){ struct AMOREneuron * ptneuron; int ind_neuron, ind_input_neuron, ind_output_neuron, ind_weight; SEXP neuron; REAL(DELTAE_STAO)[0] = ptnet->deltaE.stao ; for (ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron ++ ) { PROTECT(neuron=VECTOR_ELT(NET_NEURONS, ind_neuron ) ); ptneuron = ptnet->neurons[ind_neuron]; for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) { REAL(WEIGHTS)[ind_input_neuron] = ptneuron->weights[ind_input_neuron] ; } REAL(BIAS)[0] = ptneuron->bias ; REAL(V0)[0] = ptneuron->v0 ; REAL(V1)[0] = ptneuron->v1 ; switch(ptneuron->method) { case METHOD_ADAPTgd : REAL(ADAPTgd_DELTA)[0] = ptneuron->method_dep_variables.adaptgd.delta ; REAL(ADAPTgd_LEARNING_RATE)[0] = ptneuron->method_dep_variables.adaptgd.learning_rate; break; case METHOD_ADAPTgdwm: REAL(ADAPTgdwm_DELTA)[0] = ptneuron->method_dep_variables.adaptgdwm.delta; REAL(ADAPTgdwm_LEARNING_RATE)[0] = ptneuron->method_dep_variables.adaptgdwm.learning_rate ; REAL(ADAPTgdwm_MOMENTUM)[0] = ptneuron->method_dep_variables.adaptgdwm.momentum ; REAL(ADAPTgdwm_FORMER_BIAS_CHANGE)[0] = ptneuron->method_dep_variables.adaptgdwm.former_bias_change ; for (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { REAL(ADAPTgdwm_FORMER_WEIGHT_CHANGE)[ind_weight] = ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_weight]; } break; case METHOD_BATCHgd: REAL(BATCHgd_DELTA)[0] = ptneuron->method_dep_variables.batchgd.delta ; REAL(BATCHgd_LEARNING_RATE)[0] = ptneuron->method_dep_variables.batchgd.learning_rate ; for (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { REAL(BATCHgd_SUM_DELTA_X)[ind_weight] = ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_weight]; } REAL(BATCHgd_SUM_DELTA_BIAS)[0] = ptneuron->method_dep_variables.batchgd.sum_delta_bias ; break; default: REAL(BATCHgdwm_DELTA)[0] = ptneuron->method_dep_variables.batchgdwm.delta ; REAL(BATCHgdwm_LEARNING_RATE)[0] = ptneuron->method_dep_variables.batchgdwm.learning_rate ; for (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { REAL(BATCHgdwm_SUM_DELTA_X)[ind_weight] = ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_weight]; } REAL(BATCHgdwm_SUM_DELTA_BIAS)[0] = ptneuron->method_dep_variables.batchgdwm.sum_delta_bias; REAL(BATCHgdwm_MOMENTUM)[0] = ptneuron->method_dep_variables.batchgdwm.momentum ; REAL(BATCHgdwm_FORMER_BIAS_CHANGE)[0] = ptneuron->method_dep_variables.batchgdwm.former_bias_change ; for (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { REAL(BATCHgdwm_FORMER_WEIGHT_CHANGE)[ind_weight] = ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_weight]; } break; } UNPROTECT(1); } return ; } AMORE/src/sim.c0000755000175100001440000001614710701376253012676 0ustar hornikusers#include #include #include #include #include #include #include "AMORE.h" /******************************************************************************************************************/ SEXP sim_Forward_MLPnet (SEXP net, SEXP Ptrans, SEXP ytrans, SEXP rho) { int * Ptransdim, *ytransdim, fila, columna, Pcounter, ycounter; int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight; double x_input, a; int epoch, n_epochs; SEXP R_fcall, args, arg1, arg2, arg3; SEXP aims; struct AMOREneuron * ptneuron, * pt_that_neuron; struct AMOREnet * ptnet; double aux1, aux2; Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP)); ytransdim = INTEGER(coerceVector(getAttrib(ytrans, R_DimSymbol), INTSXP)); ptnet = copynet_RC(net); for (fila=0, Pcounter=0, ycounter=0; fila < Ptransdim[1]; fila++) { for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) { ptnet->input[columna] = REAL(Ptrans)[Pcounter]; } for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) { ptneuron = ptnet->neurons[ind_neuron]; for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) { considered_input = ptneuron->input_links[ind_weight]; if (considered_input < 0 ) { x_input = ptnet->input[-1-considered_input]; } else { x_input = ptnet->neurons[-1+considered_input]->v0; } a += ptneuron->weights[ind_weight] * x_input; } a += ptneuron->bias; switch (ptneuron->actf) { case TANSIG_ACTF: ptneuron->v0 = a_tansig * tanh(a * b_tansig); break; case SIGMOID_ACTF: ptneuron->v0 = 1/(1+exp(- a_sigmoid * a)) ; break; case PURELIN_ACTF: ptneuron->v0 = a; break; case HARDLIM_ACTF: if (a>=0) { ptneuron->v0 = 1.0; } else { ptneuron->v0 = 0.0; } break; case CUSTOM_ACTF: PROTECT(args = allocVector(REALSXP,1)); REAL(args)[0] = a; PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args)); ptneuron->v0 = REAL(eval (R_fcall, rho))[0]; UNPROTECT(2); break; } } for (ind_neuron=0; ind_neuron < ytransdim[0] ; ind_neuron++ ) { REAL(ytrans)[ycounter++] = ptnet->layers[ptnet->last_layer][ind_neuron]->v0; } } return (ytrans); } /******************************************************************************************************************/ void print_MLPneuron (SEXP neuron) { int i; Rprintf("***********************************************************\n"); /* ID */ Rprintf("ID:\t\t\t%d \n", INTEGER(ID)[0] ); /* TYPE */ Rprintf("TYPE:\t\t\t%s \n", CHAR(STRING_ELT(TYPE,0)) ); /* ACTIVATION FUNCTION */ Rprintf("ACT. FUNCTION:\t\t%s\n", CHAR(STRING_ELT(ACTIVATION_FUNCTION,0)) ); /* OUTPUT LINKS */ if (INTEGER(OUTPUT_LINKS)[0] != NA_INTEGER ) { for (i=0; i