evonet.cpp
50 // MAXN and MAXSTOREDACTIVATIONS are declared in .h and their value is set there, but that is not a
51 // definition. This means that if you try to get the address of MAXN or MAXSTOREDACTIVATIONS you get
52 // a linker error (this also happends if you pass one of them to a function that takes a reference
63 : neuronsMonitorUploader(20, DataUploader<ActivationsToGui>::IncreaseQueueSize /*BlockUploader*/) // we can be ahead of GUI by at most 20 steps, then we are blocked
138 memcpy(&(e->storedActivations[0][0]), &(this->storedActivations[0][0]), MAXSTOREDACTIVATIONS * MAXN * sizeof(float));
186 Logger::error( "Evonet - The information inside netFile will override any specification in all others parameters of Evonet" );
188 wrange = ConfigurationHelper::getDouble(params, prefix + "weightRange", 5.0); // the range of synaptic weights
189 grange = ConfigurationHelper::getDouble(params, prefix + "gainRange", 5.0); // the range of gains
190 brange = ConfigurationHelper::getDouble(params, prefix + "biasRange", 5.0); // the range of biases
191 showTInput = ConfigurationHelper::getBool(params, prefix + "showTeachingInput", false); // flag for showing teaching input
202 ConfigurationHelper::throwUserConfigError(prefix + "(nSensors + nHiddens + nMotors)", QString::number(nneurons), "Too many neurons: increase MAXN to support more than " + QString::number(MAXN) + " neurons");
211 ConfigurationHelper::throwUserConfigError(prefix + "inputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
224 ConfigurationHelper::throwUserConfigError(prefix + "hiddenNeuronType", str, "Wrong value (use \"logistic\", \"logistic+delta\", \"binary\" or \"logistic_0.2\"");
233 ConfigurationHelper::throwUserConfigError(prefix + "outputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
235 bool recurrentHiddens = ConfigurationHelper::getBool( params, prefix + "recurrentHiddens", false );
236 bool inputOutputConnections = ConfigurationHelper::getBool( params, prefix + "inputOutputConnections", false );
237 bool recurrentOutputs = ConfigurationHelper::getBool( params, prefix + "recurrentOutputs", false );
238 bool biasOnHidden = ConfigurationHelper::getBool( params, prefix + "biasOnHiddenNeurons", false );
239 bool biasOnOutput = ConfigurationHelper::getBool( params, prefix + "biasOnOutputNeurons", false );
240 create_net_block( inputNeuronType, hiddenNeuronType, outputNeuronType, recurrentHiddens, inputOutputConnections, recurrentOutputs, biasOnHidden, biasOnOutput );
244 ConfigurationHelper::throwUserConfigError(prefix + "netFile", netFile, "Could not open the specified network configuration file");
251 freep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
256 backpropfreep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
261 copybackpropfreep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
266 phep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
271 muts=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
328 d.describeString( "netFile" ).help( "The file .net where is defined the architecture to load. WARNING: when this parameter is specified any other parameters will be ignored" );
329 d.describeReal( "weightRange" ).def(5.0f).limits(1,+Infinity).help( "The synpatic weight of the neural network can only assume values in [-weightRange, +weightRange]" );
330 d.describeReal( "gainRange" ).def(5.0f).limits(0,+Infinity).help( "The gain of a neuron will can only assume values in [0, +gainRange]" );
331 d.describeReal( "biasRange" ).def(5.0f).limits(0,+Infinity).help( "The bias of a neuron will can only assume values in [-biasRange, +biasRange]" );
332 d.describeEnum( "inputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of input neurons when the network is auto generated");
333 d.describeEnum( "hiddenNeuronType" ).def("logistic").values( QStringList() << "logistic" << "logistic+delta" << "binary" << "logistic_0.2" ).help( "The type of hidden neurons when the network is auto generated");
334 d.describeEnum( "outputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of output neurons when the network is auto generated");
335 d.describeBool( "recurrentHiddens" ).def(false).help( "when true generated a network with recurrent hidden neurons");
336 d.describeBool( "inputOutputConnections" ).def(false).help( "when true generated a network with input-output connections in addition to input-hidden-output connections");
337 d.describeBool( "recurrentOutputs" ).def(false).help( "when true generated a network with recurrent output neurons");
338 d.describeBool( "biasOnHiddenNeurons" ).def(false).help( "when true generate a network with hidden neurons with a bias");
339 d.describeBool( "biasOnOutputNeurons" ).def(false).help( "when true generate a network with output neurons with a bias");
340 d.describeBool( "showTeachingInput" ).def(false).help( "Whether the teaching input has to be shown in the UI");
348 void Evonet::create_net_block( int inputNeuronType, int hiddenNeuronType, int outputNeuronType, bool recurrentHiddens, bool inputOutputConnections, bool recurrentOutputs, bool biasOnHidden, bool biasOnOutput )
495 // calculate the height and width necessary to display all created neurons (drawnymax, drawnxmax)
536 Logger::error( "Evonet - increase MAXN to support more than "+QString::number(MAXN)+" neurons" );
541 fscanf(fp,"%d %d %d %d %d %d", &net_block[b][0],&net_block[b][1],&net_block[b][2],&net_block[b][3],&net_block[b][4], &net_block[b][5]);
557 fscanf(fp,"%d %d %d %d %d %d\n", &neuronbias[n], &neurontype[n], &neurongain[n], &neuronxy[n][0], &neuronxy[n][1], &neurondisplay[n]);
570 Logger::error(QString("ERROR: parameters defined are %1 while %2 contains %3 parameters").arg(nparameters).arg(filename).arg(np));
693 fprintf(fp,"%d %d %d %d %d %d", net_block[b][0],net_block[b][1],net_block[b][2],net_block[b][3],net_block[b][4],net_block[b][5]);
706 fprintf(fp,"%d %d %d %d %d %d\n", neuronbias[n], neurontype[n], neurongain[n], neuronxy[n][0], neuronxy[n][1], neurondisplay[n]);
823 Logger::warning( "Evonet - neuron " + QString::number(i) + " will never be activated according to the current architecture" );
827 Logger::warning( "Evonet - neuron " + QString::number(i) + " will be activated more than once according to the current architecture" );
864 // printf("netinput[%d]= [%ld] %f/%f*%f = %f*%f = %f",i,p-freep, *p,wrange,brange,((double)*p/wrange),brange,((double)*p/wrange)*brange);
879 // printf("netinput[%d] += act[%d] * gain[%d] * %f += %f * %f * %f += %f = %f\n",t,i,i,*p,act[i],gain[i], *p,act[i] * gain[i] * *p, netinput[t] );
1273 void Evonet::initWeightsInRange(float minBias, float maxBias, float minWeight, float maxWeight)
1497 bpdebug("\terror[%d]= gradient*(tInput[i - net_block[%d][3]]-act[%d]) = act[%d]*(1-act[%d]) * (tInput[%d - %d = %d]-act[%d]) = %f*(1-%f) * (%f - %f) = %f * %f = %f\n",i,b,i,i,i,i,net_block[b][1],i-net_block[b][1],i,act[i],act[i],tInput[i-net_block[b][1]],act[i],act[i]*(1-act[i]),(tInput[i-net_block[b][1]] - act[i]), error[i]);
1533 bpdebug("\tnetblock[%d][3]+net_block[%d][4] -1 = %d > %d => Computing the error\n", b,b,net_block[b][3]+net_block[b][4]-1, ninputs);
1542 error[i]+= error[j] * freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]];
1544 bpdebug("\t\terror[%d] += act[j] * freep[ offset[p] + (i-net_block[b][3]) + j*net_block[b][2] ] = act[%d] * freep[ %d + %d + %d*%d ] = %f * %f = %f\n", i,j,offsets[p],(i-net_block[b][3]),(j-net_block[b][1]),net_block[b][2], act[j], freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]], act[j]*freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]] );
1572 freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]] += delta[j] * act[i];
1574 bpdebug("\t\t\tfreep[ offset[%d] + (i-net_block[%d][3]) + j*net_block[%d][2] ] = freep[ %d + %d + %d*%d = %d ] += delta[%d] * act[%d] = %f * %f = %f\n", p, b,b, offsets[p], i-net_block[b][3], j-net_block[b][1],net_block[b][4],offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4],j,i,delta[j],act[i],delta[j]*act[i] );
1575 bpdebug("\t\t\tfreep[ %d ] = %f\n", offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4], freep[offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]]);
1601 float Evonet::computeMeanSquaredError(QVector<float> trainingSet, QVector<float> desiredOutput) {
1628 // printf("d[%d] - act[%d] = %f - %f = %f\n",ptr-1,j+ninputs+nhiddens,desiredOutput[ptr-1 ], act[j+ninputs+nhiddens], tmp);
1746 if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1759 debug("Adding connection %d of output %d (freep[%d]) in w[%d]\n",j,i_freep,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr,wPtr);
1872 if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1883 debug("Adding connection %d of output %d (w[%d]) in freep[%d]\n",j,i_freep,wPtr,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr);
1902 if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1926 if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1990 memset(&trainingHiddenBlock[net_block[b][3]+j][net_block[b][1]-ninputs-nhiddens], 1, net_block[b][2]*sizeof(char));
2442 float Evonet::trainLevembergMarquardt(QVector<float> trainingSet, QVector<float> desiredOutput, float maxError) {
2612 // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
2613 debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
2618 // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
2653 // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
2674 printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
2709 // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
2710 new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
2712 // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
2752 float Evonet::trainLevembergMarquardtThroughTime(QVector<float> trainingSet, QVector<float> desiredOutput, int time, float maxError) {
2930 // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
2931 debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
2936 // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
2939 #warning The trainLevembergMarquardtThroughTime method requires that all the connections to a particular hidden block are in the same net_block.
2987 // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
3008 printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
3043 // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
3044 new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
3046 // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
3114 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
3130 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
3177 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
3193 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
static const int MAXSTOREDACTIVATIONS
The maximum number of stored activation vectors.
Definition: evonet.h:133
void setNeckReflex()
Experiment specific function that set certain weights so to provide a given reflex behavior...
Definition: evonet.cpp:3212
void updateNet()
Update the state of the internal and motor neurons on the basis of: (i) the property of the neurons (...
Definition: evonet.cpp:834
void initWeightsInRange(float min, float max)
Initializes weights and biases randomly.
Definition: evonet.cpp:1247
float getFreeParameter(int i)
return the value of the ith parameter (normalized in the range [-wrange, wrange]
Definition: evonet.cpp:1075
int extractWeightsFromNet(Eigen::VectorXf &w)
Extract the connection weights and biases from the free parameters vector into a matrix to be used by...
Definition: evonet.cpp:1645
float getHidden(int h)
return the value of a hidden neuron (-999 if the specified id is out of range)
Definition: evonet.cpp:1061
static QString getString(ConfigurationParameters ¶ms, QString paramPath, QString def=QString())
void create_net_block(int inputNeuronType, int hiddenNeuronType, int outputNeuronType, bool recurrentHiddens, bool inputOutputConnections, bool recurrentOutputs, bool biasOnHidden, bool biasOnOutput)
Create the block structure that describe the architecture of the network (unless this structure is cr...
Definition: evonet.cpp:348
float getWrange()
return the value of wrange (which also determine the range in which all parameters are normalized) ...
Definition: evonet.cpp:1162
void printIO()
Print the activation state of sensory, internal, and motor neurons.
Definition: evonet.cpp:1132
float getWeight(int to, int from)
Return the weight that depart from neuron "from" and reach neuron "to".
Definition: evonet.cpp:1893
static const double Infinity
float trainLevembergMarquardtThroughTime(QVector< float > trainingSet, QVector< float > desiredOutput, int time, float maxError)
Train the network through the Levemberg Marquardt through time gradient descent algorithm.
Definition: evonet.cpp:2752
bool showTeachingInput()
Checks whether the teaching input has to be shown.
Definition: evonet.cpp:2031
int nselected
Number of parameters selected through the graphic interface Enable the user to modify the parameters ...
Definition: evonet.h:467
void prepareForTraining(QVector< float > &err_w)
Initialize variables required by back-propagation training mconnections: number of weights and biases...
Definition: evonet.cpp:1945
void copyPheParameters(int *pheGene)
transorm floating point parameters normalized in the range [-wrange,range] into integer parameters in...
Definition: evonet.cpp:1120
float backPropStep(QVector< float > tInput, double rate)
Computes the neuron deltas for the Backpropagation algorithm.
Definition: evonet.cpp:2055
void setNetworkName(const QString &name)
Sets the name of this neural network.
Definition: evonet.cpp:169
FARSA_UTIL_TEMPLATE const T max(const T &t1, const U &t2)
void readOldPheLine(QStringList, float *, float *)
Assign to a free parameter and to the free parameter mutation rate the value extracted from a string ...
Definition: evonet.cpp:612
int load_net_blocks(const char *filename, int mode)
Load the description of the neural architecture from .net of .phe file (see the create_net_block meth...
Definition: evonet.cpp:515
void injectHidden(int nh, float val)
set the actiovation state of a hidden neuron
Definition: evonet.cpp:1054
static void describe(QString type)
Add to Factory::typeDescriptions() the descriptions of all parameters and subgroups.
Definition: evonet.cpp:323
void hardwire()
Experiment specific function that set the weights for a reflex and set the weights that should not be...
Definition: evonet.cpp:1355
static void throwUserConfigError(QString paramName, QString paramValue, QString description)
bool neuronlesion[MAXN]
a vector that speficy lesioned and unlesioned neurons
Definition: evonet.h:423
float getTeachingInputEntry(int id)
Returns the value of the teaching input element in id position.
Definition: evonet.cpp:2045
bool updateNeuronMonitor
Set to true if labels or colors have to be updated in the neuron monitor.
Definition: evonet.h:431
void computeParameters()
Compute the required number of free parameters on the basis of: (i) the property of the neurons (stor...
Definition: evonet.cpp:777
float trainLevembergMarquardt(QVector< float > trainingSet, QVector< float > desiredOutput, float maxError)
Train the network through the Levemberg Marquardt gradient descent algorithm.
Definition: evonet.cpp:2442
float backPropStep2(QVector< float > tInput, double rate)
Computes the neuron deltas for the Backpropagation algorithm but does not update the weights...
Definition: evonet.cpp:2194
static const int MAXN
Maximum number of neurons for a neural network of this type.
Definition: evonet.h:135
static bool getBool(ConfigurationParameters ¶ms, QString paramPath, bool def=false)
void activateMonitorUpdate()
the labels of the neurons displayed by the graphic widget
Definition: evonet.cpp:2036
Definition: evonetui.h:50
static void info(QString msg)
static void error(QString msg)
void setWeight(int to, int from, float w)
Set the weight that depart from neuron "from" and reach neuron "to".
Definition: evonet.cpp:1918
void initWeightsNguyenWidrow(float min, float max)
Initializes weights and biases according to the Nguyen-Widrow initialization algorithm.
Definition: evonet.cpp:1301
QColor neurondcolor[MAXN]
the color used to display the actiovation state of each neuron in the neuron monitor widget ...
Definition: evonet.h:419
int neurondisplay[MAXN]
the vectors that specify for each neuron whether it should be displayed or not by the neuron monitor ...
Definition: evonet.h:410
bool startObjectParameters(QString groupPath, QString typeName, ParameterSettable *object)
float neuronlesionVal[MAXN]
the value to be assigned to the state of lesioned neurons
Definition: evonet.h:427
bool pheFileLoaded()
check whether a .phe file (with parameters description) has been loaded
Definition: evonet.cpp:1080
void getMutations(float *mut)
set the mutation vector of the genetic algorithm that determine how parameters are mutated ...
Definition: evonet.cpp:1112
static double getDouble(ConfigurationParameters ¶ms, QString paramPath, double def=0)
static Descriptor addTypeDescription(QString type, QString shortHelp, QString longHelp=QString(""))
void readNewPheLine(QStringList, float *, float *)
Assign to a free parameter and to the free parameter mutation rate the value extracted from a string ...
Definition: evonet.cpp:621
void configure(ConfigurationParameters ¶ms, QString prefix)
Configures the object using a ConfigurationParameters object.
Definition: evonet.cpp:179
float * getOldestStoredActivations()
Returns the oldest stored activation vector and removes it.
Definition: evonet.cpp:1223
const QString & getNetworkName() const
Returns the name of this neural network.
Definition: evonet.cpp:174
void setRanges(double weight, double bias, double gain)
set the range of connection weights, biases, and gains
Definition: evonet.cpp:1216
void save_net_blocks(const char *filename, int mode)
Save the description of the neural architecture into a .net of .phe file (see the create_net_block me...
Definition: evonet.cpp:658
int importWeightsFromVector(Eigen::VectorXf &w)
Copy back the weights and biases trained through the Levenberg Marquardt Algorithm into the free para...
Definition: evonet.cpp:1774
float getOutput(int out)
return the value of a motor neuron (-1 if the specified id is out of range)
Definition: evonet.cpp:1025
FARSA_UTIL_TEMPLATE const T min(const T &t1, const U &t2)
float computeMeanSquaredError(QVector< float > trainingSet, QVector< float > desiredOutput)
Update the network and compute and return the mean squared error.
Definition: evonet.cpp:1601
static int getInt(ConfigurationParameters ¶ms, QString paramPath, int def=0)
float * getBackPropWeightModification()
Returns the array of weight/bias updates after backpropagation.
Definition: evonet.cpp:2390
ParameterSettableUI * getUIManager()
Return a new instance of the EvonetUI.
Definition: evonet.cpp:343
float ** selectedp
pointer to the list pointer-to-parameters selected through the graphic interface Enable the user to m...
Definition: evonet.h:462
void createParameter(QString groupPath, QString parameter)
static void warning(QString msg)
void initBackPropFreep()
Initializes backpropfreep for applying backPropStep2 algorithm.
Definition: evonet.cpp:2351
void setParameters(const float *dt)
set the free parameters on the basis of a genotype string
Definition: evonet.cpp:1088
double neuronrange[MAXN][2]
the matrix that contain the variation range of neurons used by the neuron monitor graphic widget ...
Definition: evonet.h:415
void calculateBackPropagationError(QVector< float > tInput)
Computes the Backpropagation error (the backpropagation algorithm is not applied to the ANN) ...
Definition: evonet.cpp:2334
void save(ConfigurationParameters ¶ms, QString prefix)
Saves the actual status of parameters into the ConfigurationParameters object passed.
Definition: evonet.cpp:309
static const float DEFAULT_VALUE
DEFAULT_VALUE is used for do not assign values to mut and parameters.
Definition: evonet.h:137