evonet.cpp
1 /********************************************************************************
2  * FARSA Experiments Library *
3  * Copyright (C) 2007-2012 *
4  * Stefano Nolfi <stefano.nolfi@istc.cnr.it> *
5  * Onofrio Gigliotta <onofrio.gigliotta@istc.cnr.it> *
6  * Gianluca Massera <emmegian@yahoo.it> *
7  * Tomassino Ferrauto <tomassino.ferrauto@istc.cnr.it> *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the Free Software *
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *
22  ********************************************************************************/
23 
24 #include "evonet.h"
25 #include "logger.h"
26 #include "configurationhelper.h"
27 #include "evonetui.h"
28 #include "mathutils.h"
29 #include <QFileInfo>
30 #include <cstring>
31 #include <limits>
32 #include <algorithm>
33 
34 #include <Eigen/Dense>
35 
36 #ifndef FARSA_MAC
37  #include <malloc.h> // DEBUG: to be sobstituted with new
38 #endif
39 
40 // All the suff below is to avoid warnings on Windows about the use of unsafe
41 // functions. This should be only a temporary workaround, the solution is stop
42 // using C string and file functions...
43 #if defined(_MSC_VER)
44  #pragma warning(push)
45  #pragma warning(disable:4996)
46 #endif
47 
48 namespace farsa {
49 
50 // MAXN and MAXSTOREDACTIVATIONS are declared in .h and their value is set there, but that is not a
51 // definition. This means that if you try to get the address of MAXN or MAXSTOREDACTIVATIONS you get
52 // a linker error (this also happends if you pass one of them to a function that takes a reference
53 // or const reference). However, we cannot initialize them here because these are used to define
54 // arrays, so their value must be present in the .h
55 // We must however not define them on Visual Studio 2008...
56 #if !defined(_MSC_VER) || _MSC_VER > 1600
58 const int Evonet::MAXN;
59 #endif
60 const float Evonet::DEFAULT_VALUE = -99.0f;
61 
63  : neuronsMonitorUploader(20, DataUploader<ActivationsToGui>::IncreaseQueueSize /*BlockUploader*/) // we can be ahead of GUI by at most 20 steps, then we are blocked
64  , m_evonetUI(NULL)
65 {
66  wrange = 5.0; // weight range
67  grange = 5.0; // gain range
68  brange = 5.0; // bias range
69  neuronlesions = 0;
70  freep = new float[1000];
71  backpropfreep = new float[1000];
72  copybackpropfreep = new float[1000];
73  teachingInput.fill(0.0, MAXN);
74  phep = NULL;
75  muts = NULL;
76  geneMaxValue = 255;
77  pheloaded = false;
78  selectedp= (float **) malloc(100 * sizeof(float **));
79  for (int i = 0; i < MAXN; i++) {
80  neuronl[i][0] = '\0';
81  neurondisplay[i] = 1;
82  neuronrange[i][0] = 0.0;
83  neuronrange[i][1] = 1.0;
84  neurondcolor[i] = Qt::black;
85  neuronlesion[i] = false;
86  neuronlesionVal[i] = 0.0;
87  }
88  net_nblocks = 0;
89 
90  nextStoredActivation = 0;
91  firstStoredActivation = 0;
92  updatescounter = 0;
93 
94  training = false;
95  updateNeuronMonitor = false;
96 }
97 
98 Evonet* Evonet::cloneNet() const
99 {
100  Evonet* e = new Evonet();
101  // Copy all fields (smart order)
102  e->nparameters = this->nparameters;
103  // Number of inputs, outputs, hiddens and neurons
104  e->nhiddens = this->nhiddens;
105  e->ninputs = this->ninputs;
106  e->nneurons = this->nneurons;
107  e->noutputs = this->noutputs;
108  // Ranges
109  e->brange = this->brange;
110  e->grange = this->grange;
111  e->wrange = this->wrange;
112  // Pointers
113  e->freep = new float[e->nparameters];
114  memcpy(e->freep, this->freep, e->nparameters * sizeof(float));
115  e->backpropfreep = new float[e->nparameters];
116  memcpy(e->backpropfreep, this->backpropfreep, e->nparameters * sizeof(float));
117  e->copybackpropfreep = new float[e->nparameters];
118  memcpy(e->copybackpropfreep, this->copybackpropfreep, e->nparameters * sizeof(float));
119  e->muts = new float[e->nparameters];
120  memcpy(e->muts, this->muts, e->nparameters * sizeof(float));
121  e->phep = new float[e->nparameters];
122  memcpy(e->phep, this->phep, e->nparameters * sizeof(float));
123  // Vectors and matrices
124  memcpy(&(e->act[0]), &(this->act[0]), MAXN * sizeof(float));
125  memcpy(&(e->input[0]), &(this->input[0]), MAXN * sizeof(float));
126  memcpy(&(e->net_block[0][0]), &(this->net_block[0][0]), MAXN * 6 * sizeof(int));
127  memcpy(&(e->netinput[0]), &(this->netinput[0]), MAXN * sizeof(float));
128  memcpy(&(e->neuronbias[0]), &(this->neuronbias[0]), MAXN * sizeof(int));
129  memcpy(&(e->neurondcolor[0]), &(this->neurondcolor[0]), MAXN * sizeof(QColor));
130  memcpy(&(e->neurondisplay[0]), &(this->neurondisplay[0]), MAXN * sizeof(int));
131  memcpy(&(e->neurongain[0]), &(this->neurongain[0]), MAXN * sizeof(int));
132  memcpy(&(e->neuronl[0][0]), &(this->neuronl[0][0]), MAXN * 10 * sizeof(char));
133  memcpy(&(e->neuronlesion[0]), &(this->neuronlesion[0]), MAXN * sizeof(bool));
134  memcpy(&(e->neuronlesionVal[0]), &(this->neuronlesionVal[0]), MAXN * sizeof(float));
135  memcpy(&(e->neuronrange[0][0]), &(this->neuronrange[0][0]), MAXN * 2 * sizeof(double));
136  memcpy(&(e->neurontype[0]), &(this->neurontype[0]), MAXN * sizeof(int));
137  memcpy(&(e->neuronxy[0][0]), &(this->neuronxy[0][0]), MAXN * 2 * sizeof(int));
138  memcpy(&(e->storedActivations[0][0]), &(this->storedActivations[0][0]), MAXSTOREDACTIVATIONS * MAXN * sizeof(float));
139  // Other variables
140  e->backproperror = this->backproperror;
141  e->drawnxmax = this->drawnxmax;
142  e->drawnymax = this->drawnymax;
143  e->err_weight_sum = this->err_weight_sum;
144  e->firstStoredActivation = this->firstStoredActivation;
145  e->geneMaxValue = this->geneMaxValue;
146  e->maxIterations = this->maxIterations;
147  e->nconnections = this->nconnections;
148  e->ndata = this->ndata;
149  e->net_nblocks = this->net_nblocks;
150  e->neuronlesions = this->neuronlesions;
151  e->nextStoredActivation = this->nextStoredActivation;
152  e->nparambias = this->nparambias;
153  e->nselected = this->nselected;
154  e->p = this->p;
155  e->pheloaded = this->pheloaded;
156  e->showTInput = this->showTInput;
157  e->training = this->training;
158  e->updateMonitor = this->updateMonitor;
160  e->updatescounter = this->updatescounter;
161  // QVectors, QStrings
162  e->err_weights = this->err_weights;
163  e->netFile = this->netFile;
164  e->networkName = this->networkName;
165  e->teachingInput = this->teachingInput;
166  return e;
167 }
168 
169 void Evonet::setNetworkName(const QString& name)
170 {
171  networkName = name;
172 }
173 
174 const QString& Evonet::getNetworkName() const
175 {
176  return networkName;
177 }
178 
179 void Evonet::configure(ConfigurationParameters& params, QString prefix) {
180  int nSensors = ConfigurationHelper::getInt( params, prefix+"nSensors", 0 );
181  int nHiddens = ConfigurationHelper::getInt( params, prefix+"nHiddens", 0 );
182  int nMotors = ConfigurationHelper::getInt( params, prefix+"nMotors", 0 );
183  QString netFile = ConfigurationHelper::getString( params, prefix+"netFile", "" );
184  // --- some parameters are in conflicts
185  if ( netFile != "" && (nSensors+nHiddens+nMotors)>0 ) {
186  Logger::error( "Evonet - The information inside netFile will override any specification in all others parameters of Evonet" );
187  }
188  wrange = ConfigurationHelper::getDouble(params, prefix + "weightRange", 5.0); // the range of synaptic weights
189  grange = ConfigurationHelper::getDouble(params, prefix + "gainRange", 5.0); // the range of gains
190  brange = ConfigurationHelper::getDouble(params, prefix + "biasRange", 5.0); // the range of biases
191  showTInput = ConfigurationHelper::getBool(params, prefix + "showTeachingInput", false); // flag for showing teaching input
192 
193  updateMonitor = true; //by default it is always updated
194 
195  if ( netFile.isEmpty() ) {
196  // generate a neural network from parameters
197  ninputs = nSensors;
198  nhiddens = nHiddens;
199  noutputs = nMotors;
200  nneurons = ninputs + nhiddens + noutputs;
201  if (this->nneurons > MAXN) {
202  ConfigurationHelper::throwUserConfigError(prefix + "(nSensors + nHiddens + nMotors)", QString::number(nneurons), "Too many neurons: increase MAXN to support more than " + QString::number(MAXN) + " neurons");
203  }
204  int inputNeuronType = 0;
205  QString str = ConfigurationHelper::getString( params, prefix + "inputNeuronType", "no_delta" );
206  if ( str == QString("no_delta") ) {
207  inputNeuronType = 0;
208  } else if ( str == QString("with_delta") ) {
209  inputNeuronType = 1;
210  } else {
211  ConfigurationHelper::throwUserConfigError(prefix + "inputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
212  }
213  int hiddenNeuronType = 0;
214  str = ConfigurationHelper::getString( params, prefix + "hiddenNeuronType", "logistic" );
215  if ( str == QString("logistic") ) {
216  hiddenNeuronType = 0;
217  } else if ( str == QString("logistic+delta") ) {
218  hiddenNeuronType = 1;
219  } else if ( str == QString("binary") ) {
220  hiddenNeuronType = 2;
221  } else if ( str == QString("logistic_0.2") ) {
222  hiddenNeuronType = 3;
223  } else {
224  ConfigurationHelper::throwUserConfigError(prefix + "hiddenNeuronType", str, "Wrong value (use \"logistic\", \"logistic+delta\", \"binary\" or \"logistic_0.2\"");
225  }
226  int outputNeuronType = 0;
227  str = ConfigurationHelper::getString( params, prefix + "outputNeuronType", "no_delta" );
228  if ( str == QString("no_delta") ) {
229  outputNeuronType = 0;
230  } else if ( str == QString("with_delta") ) {
231  outputNeuronType = 1;
232  } else {
233  ConfigurationHelper::throwUserConfigError(prefix + "outputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
234  }
235  bool recurrentHiddens = ConfigurationHelper::getBool( params, prefix + "recurrentHiddens", false );
236  bool inputOutputConnections = ConfigurationHelper::getBool( params, prefix + "inputOutputConnections", false );
237  bool recurrentOutputs = ConfigurationHelper::getBool( params, prefix + "recurrentOutputs", false );
238  bool biasOnHidden = ConfigurationHelper::getBool( params, prefix + "biasOnHiddenNeurons", false );
239  bool biasOnOutput = ConfigurationHelper::getBool( params, prefix + "biasOnOutputNeurons", false );
240  create_net_block( inputNeuronType, hiddenNeuronType, outputNeuronType, recurrentHiddens, inputOutputConnections, recurrentOutputs, biasOnHidden, biasOnOutput );
241  } else {
242  // load the neural network from file. If the file doesn't exists, throwing an exception
243  if (load_net_blocks(netFile.toLatin1().data(), 0) == 0) {
244  ConfigurationHelper::throwUserConfigError(prefix + "netFile", netFile, "Could not open the specified network configuration file");
245  }
246  }
247 
249  // --- reallocate data on the basis of number of parameters
250  delete[] freep;
251  freep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
252  for(int r=0;r<nparameters;r++)
253  freep[r]=0.0f;
254 
255  delete[] backpropfreep;
256  backpropfreep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
257  for(int r=0;r<nparameters;r++)
258  backpropfreep[r]=0.0f;
259 
260  delete[] copybackpropfreep;
261  copybackpropfreep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
262  for(int r=0;r<nparameters;r++)
263  copybackpropfreep[r]=0.0f;
264 
265  delete[] phep;
266  phep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
267  for(int r=0;r<nparameters;r++)
268  phep[r]=DEFAULT_VALUE; // default value correspond to dont' care
269 
270  delete[] muts;
271  muts=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
272  for(int r=0;r<nparameters;r++)
273  muts[r]=DEFAULT_VALUE; // default value correspond to dont' care
274 
275  if ( !netFile.isEmpty() ) {
276  // Try to Load the file filename.phe if present in the current directory
277 
278 /* char cCurrentPath[300];
279 
280  if (!getcwd(cCurrentPath, sizeof(cCurrentPath)))
281  {
282  printf("error\n");
283  }
284  else {
285  cCurrentPath[sizeof(cCurrentPath) - 1] = '\0';
286 
287  printf ("The current working directory is %s\n", cCurrentPath);
288  }
289 */
290  QFileInfo fileNet( netFile );
291  QString filePhe = fileNet.baseName() + ".phe";
292  load_net_blocks(filePhe.toLatin1().data(), 1);
293  }
294 
295  //resetting net
296  resetNet();
297 
298  printBlocks();
299 
300  // we create the labels of the hidden neurons
301  for(int i = 0; i < nhiddens; i++) {
302  sprintf(neuronl[ninputs+i], "h%d", i);
303  neuronrange[ninputs+i][0] = 0.0;
304  neuronrange[ninputs+i][1] = 1.0;
305  neurondcolor[ninputs+i] = QColor(125,125,125);
306  }
307 }
308 
309 void Evonet::save(ConfigurationParameters& params, QString prefix) {
310  params.startObjectParameters( prefix, "Evonet", this );
311  if ( netFile.isEmpty() ) {
312  params.createParameter( prefix, "nSensors", QString::number(ninputs) );
313  params.createParameter( prefix, "nHidden", QString::number(nhiddens) );
314  params.createParameter( prefix, "nMotors", QString::number(noutputs) );
315  } else {
316  params.createParameter( prefix, "netFile", netFile );
317  }
318  params.createParameter( prefix, "weightRange", QString::number(wrange) );
319  params.createParameter( prefix, "gainRange", QString::number(grange) );
320  params.createParameter( prefix, "biasRange", QString::number(brange) );
321 }
322 
323 void Evonet::describe( QString type ) {
324  Descriptor d = addTypeDescription( type, "Neural Network imported from Evorobot" );
325  d.describeInt( "nSensors" ).limits( 1, MAXN ).help( "The number of sensor neurons" );
326  d.describeInt( "nHiddens" ).limits( 1, MAXN ).help( "The number of hidden neurons" );
327  d.describeInt( "nMotors" ).limits( 1, MAXN ).help( "The number of motor neurons" );
328  d.describeString( "netFile" ).help( "The file .net where is defined the architecture to load. WARNING: when this parameter is specified any other parameters will be ignored" );
329  d.describeReal( "weightRange" ).def(5.0f).limits(1,+Infinity).help( "The synpatic weight of the neural network can only assume values in [-weightRange, +weightRange]" );
330  d.describeReal( "gainRange" ).def(5.0f).limits(0,+Infinity).help( "The gain of a neuron will can only assume values in [0, +gainRange]" );
331  d.describeReal( "biasRange" ).def(5.0f).limits(0,+Infinity).help( "The bias of a neuron will can only assume values in [-biasRange, +biasRange]" );
332  d.describeEnum( "inputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of input neurons when the network is auto generated");
333  d.describeEnum( "hiddenNeuronType" ).def("logistic").values( QStringList() << "logistic" << "logistic+delta" << "binary" << "logistic_0.2" ).help( "The type of hidden neurons when the network is auto generated");
334  d.describeEnum( "outputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of output neurons when the network is auto generated");
335  d.describeBool( "recurrentHiddens" ).def(false).help( "when true generated a network with recurrent hidden neurons");
336  d.describeBool( "inputOutputConnections" ).def(false).help( "when true generated a network with input-output connections in addition to input-hidden-output connections");
337  d.describeBool( "recurrentOutputs" ).def(false).help( "when true generated a network with recurrent output neurons");
338  d.describeBool( "biasOnHiddenNeurons" ).def(false).help( "when true generate a network with hidden neurons with a bias");
339  d.describeBool( "biasOnOutputNeurons" ).def(false).help( "when true generate a network with output neurons with a bias");
340  d.describeBool( "showTeachingInput" ).def(false).help( "Whether the teaching input has to be shown in the UI");
341 }
342 
344  m_evonetUI = new EvonetUI( this, &neuronsMonitorUploader );
345  return m_evonetUI;
346 }
347 
348 void Evonet::create_net_block( int inputNeuronType, int hiddenNeuronType, int outputNeuronType, bool recurrentHiddens, bool inputOutputConnections, bool recurrentOutputs, bool biasOnHidden, bool biasOnOutput )
349 {
350  int n;
351  int i;
352  int startx;
353  int dx;
354 
355  // setting the neuron types
356  for(i = 0; i < this->ninputs; i++) {
357  this->neurontype[i]= inputNeuronType;
358  neuronbias[i] = 0;
359  }
360  for(i = this->ninputs; i < (this->nneurons - this->noutputs); i++) {
361  this->neurontype[i]= hiddenNeuronType;
362  neuronbias[i] = (biasOnHidden) ? 1 : 0;
363  }
364  for(i = (this->nneurons - this->noutputs); i < this->nneurons; i++) {
365  this->neurontype[i]= outputNeuronType;
366  neuronbias[i] = (biasOnOutput) ? 1 : 0;
367  }
368 
369  // gain
370  for(i=0; i < this->nneurons; i++) {
371  this->neurongain[i]= 0;
372  }
373 
374  this->net_nblocks = 0;
375  // input update block
376  this->net_block[this->net_nblocks][0] = 1;
377  this->net_block[this->net_nblocks][1] = 0;
378  this->net_block[this->net_nblocks][2] = this->ninputs;
379  this->net_block[this->net_nblocks][3] = 0;
380  this->net_block[this->net_nblocks][4] = 0;
381  this->net_block[this->net_nblocks][5] = 0;
382  this->net_nblocks++;
383 
384  // input-hidden connections
385  if (this->nhiddens > 0) {
386  this->net_block[this->net_nblocks][0] = 0;
387  this->net_block[this->net_nblocks][1] = this->ninputs;
388  this->net_block[this->net_nblocks][2] = this->nhiddens;
389  this->net_block[this->net_nblocks][3] = 0;
390  this->net_block[this->net_nblocks][4] = this->ninputs;
391  this->net_block[this->net_nblocks][5] = 0;
392  this->net_nblocks++;
393  }
394 
395  // hidden-hidden connections
396  if (recurrentHiddens) {
397  this->net_block[this->net_nblocks][0] = 0;
398  this->net_block[this->net_nblocks][1] = this->ninputs;
399  this->net_block[this->net_nblocks][2] = this->nhiddens;
400  this->net_block[this->net_nblocks][3] = this->ninputs;
401  this->net_block[this->net_nblocks][4] = this->nhiddens;
402  this->net_block[this->net_nblocks][5] = 0;
403  this->net_nblocks++;
404  }
405 
406  // hidden update block
407  if (this->nhiddens > 0) {
408  this->net_block[this->net_nblocks][0] = 1;
409  this->net_block[this->net_nblocks][1] = this->ninputs;
410  this->net_block[this->net_nblocks][2] = this->nhiddens;
411  this->net_block[this->net_nblocks][3] = 0;
412  this->net_block[this->net_nblocks][4] = 0;
413  this->net_block[this->net_nblocks][5] = 0;
414  this->net_nblocks++;
415  }
416 
417  // input-output connections
418  if (this->nhiddens == 0 || inputOutputConnections) {
419  this->net_block[this->net_nblocks][0] = 0;
420  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
421  this->net_block[this->net_nblocks][2] = this->noutputs;
422  this->net_block[this->net_nblocks][3] = 0;
423  this->net_block[this->net_nblocks][4] = this->ninputs;
424  this->net_block[this->net_nblocks][5] = 0;
425  this->net_nblocks++;
426  }
427 
428  // hidden-output connections
429  if (this->nhiddens > 0) {
430  this->net_block[net_nblocks][0] = 0;
431  this->net_block[net_nblocks][1] = this->ninputs + this->nhiddens;
432  this->net_block[net_nblocks][2] = this->noutputs;
433  this->net_block[net_nblocks][3] = this->ninputs;
434  this->net_block[net_nblocks][4] = this->nhiddens;
435  this->net_block[this->net_nblocks][5] = 0;
436  this->net_nblocks++;
437  }
438 
439  // output-output connections
440  if (recurrentOutputs) {
441  this->net_block[this->net_nblocks][0] = 0;
442  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
443  this->net_block[this->net_nblocks][2] = this->noutputs;
444  this->net_block[this->net_nblocks][3] = this->ninputs + this->nhiddens;
445  this->net_block[this->net_nblocks][4] = this->noutputs;
446  this->net_block[this->net_nblocks][5] = 0;
447  this->net_nblocks++;
448  }
449 
450  // output update block
451  this->net_block[this->net_nblocks][0] = 1;
452  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
453  this->net_block[this->net_nblocks][2] = this->noutputs;
454  this->net_block[this->net_nblocks][3] = 0;
455  this->net_block[this->net_nblocks][4] = 0;
456  this->net_block[this->net_nblocks][5] = 0;
457  this->net_nblocks++;
458 
459  // cartesian xy coordinate for sensory neurons for display (y=400)
460  n = 0;
461  dx = 30;//25
462  if (this->ninputs > this->noutputs) {
463  startx = 50;
464  } else {
465  startx = ((this->noutputs - this->ninputs) / 2) * dx + 50;
466  }
467  for(i = 0; i < this->ninputs; i++, n++) {
468  this->neuronxy[n][0] = (i * dx) + startx;
469  this->neuronxy[n][1] = 400;
470  }
471 
472  // cartesian xy coordinate for internal neurons for display (y=225)
473  startx = this->ninputs * dx;
474  for(i=0; i < (this->nneurons - (this->ninputs + this->noutputs)); i++, n++) {
475  this->neuronxy[n][0] = startx + (i * dx);
476  this->neuronxy[n][1] = 225;
477  }
478 
479  // cartesian xy coordinate for motor neurons for display (y=50)
480  if (this->ninputs > this->noutputs) {
481  startx = ((this->ninputs - this->noutputs) / 2) * dx + 50;
482  } else {
483  startx = 50;
484  }
485  for(i=0; i < this->noutputs; i++, n++) {
486  this->neuronxy[n][0] = startx + (i * dx);
487  this->neuronxy[n][1] = 50;
488  }
489 
490  // set neurons whose activation should be displayed
491  for(i=0; i < this->nneurons; i++) {
492  this->neurondisplay[i] = 1;
493  }
494 
495  // calculate the height and width necessary to display all created neurons (drawnymax, drawnxmax)
496  drawnymax = 400 + 30;
497  for(i = 0, drawnxmax = 0; i < nneurons; i++) {
498  if (neuronxy[i][0] > drawnxmax) {
499  drawnxmax = neuronxy[i][0];
500  }
501  }
502  drawnxmax += 60;
503 
504  // compute the number of parameters
506 
507  //i = this->ninputs;
508  //if (this->ninputs > i)
509  // i = this->noutputs;
510  //if ((this->nneurons - this->noutputs) > i)
511  // i = (this->nneurons - this->noutputs);
512  //drawnxmax = (i * dx) + dx + 30;
513 }
514 
515 int Evonet::load_net_blocks(const char *filename, int mode)
516 {
517 
518  FILE *fp;
519  int b;
520  int n;
521  int i;
522  float *ph;
523  float *mu;
524  float *p;
525  int np;
526  const int bufferSize = 128;
527  char cbuffer[bufferSize];
528 
529  if ((fp = fopen(filename,"r")) != NULL)
530  {
531  fscanf(fp,"ARCHITECTURE\n");
532  fscanf(fp,"nneurons %d\n", &nneurons);
533  fscanf(fp,"nsensors %d\n", &ninputs);
534  fscanf(fp,"nmotors %d\n", &noutputs);
535  if (nneurons > MAXN)
536  Logger::error( "Evonet - increase MAXN to support more than "+QString::number(MAXN)+" neurons" );
537  nhiddens = nneurons - (ninputs + noutputs);
538  fscanf(fp,"nblocks %d\n", &net_nblocks);
539  for (b=0; b < net_nblocks; b++)
540  {
541  fscanf(fp,"%d %d %d %d %d %d", &net_block[b][0],&net_block[b][1],&net_block[b][2],&net_block[b][3],&net_block[b][4], &net_block[b][5]);
542  if (net_block[b][0] == 0)
543  fscanf(fp," // connections block\n");
544  if (net_block[b][0] == 1)
545  fscanf(fp," // block to be updated\n");
546  if (net_block[b][0] == 2)
547  fscanf(fp," // gain block\n");
548  if (net_block[b][0] == 3)
549  fscanf(fp," // modulated gain block\n");
550  }
551 
552  fscanf(fp,"neurons bias, delta, gain, xy position, display\n");
553  drawnxmax = 0;
554  drawnymax = 0;
555  for(n=0; n < nneurons; n++)
556  {
557  fscanf(fp,"%d %d %d %d %d %d\n", &neuronbias[n], &neurontype[n], &neurongain[n], &neuronxy[n][0], &neuronxy[n][1], &neurondisplay[n]);
558  if(drawnxmax < neuronxy[n][0])
559  drawnxmax = neuronxy[n][0];
560  if(drawnymax < neuronxy[n][1])
561  drawnymax = neuronxy[n][1];
562  }
563  drawnxmax += 30;
564  drawnymax += 30;
565 
566  if (mode == 1)
567  {
568  fscanf(fp,"FREE PARAMETERS %d\n", &np);
569  if (nparameters != np) {
570  Logger::error(QString("ERROR: parameters defined are %1 while %2 contains %3 parameters").arg(nparameters).arg(filename).arg(np));
571  }
572  i = 0;
573  ph = phep;
574  mu = muts;
575  p = freep;
576 
577  while (fgets(cbuffer,bufferSize,fp) != NULL && i < np)
578  {
579  //read values from line
580  QString line = cbuffer;
581  QStringList lineContent = line.split(QRegExp("\\s+"), QString::SkipEmptyParts);
582 
583  bool floatOnSecondPlace = false;
584  lineContent[1].toFloat(&floatOnSecondPlace);
585 
586  if(lineContent.contains("*") || floatOnSecondPlace)
587  readNewPheLine(lineContent, ph, mu);
588  else
589  readOldPheLine(lineContent, ph, mu);
590 
591  *p = *ph;
592 
593  i++;
594  mu++;
595  ph++;
596  p++;
597  }
598  pheloaded = true;
599  }
600  fclose(fp);
601 
602  Logger::info( "Evonet - loaded file " + QString(filename) );
603  return(1);
604  }
605  else
606  {
607  Logger::warning( "Evonet - File " + QString(filename) + " not found" );
608  return(0);
609  }
610 }
611 
612 void Evonet::readOldPheLine(QStringList line, float* par, float* mut)
613 {
614  *par = line[0].toFloat();
615 
616  if(*par != DEFAULT_VALUE) { //no mutations
617  *mut = 0;
618  }
619 }
620 
621 void Evonet::readNewPheLine(QStringList line, float* par, float* mut)
622 {
623  if(line[0] == "*") {
624  *par = DEFAULT_VALUE; //start at random
625  } else {
626  //error handling
627 
628 /* char *tmp = line[0].toLatin1().data();
629  printf("read : %s\n",line[0].toLatin1().data());
630  printf("tmp : %s\n",tmp);
631 
632  for (int i = 0; i<line[0].length(); i++) {
633  if (tmp[i]==',') {
634  tmp[i] = '.';
635 
636  }
637  }
638 */
639 // *par = strtof(tmp, NULL);
640 
641 
642 
643 // sscanf(tmp, "%f", par);
644  *par = line[0].toFloat();
645  }
646 
647 
648  if(line[1] == "*") {
649  *mut = DEFAULT_VALUE;
650  } else {
651  *mut = line[1].toFloat();
652  }
653 }
654 
655 /*
656  * It save the architecture and also the parameters (when mode =1)
657  */
658 void Evonet::save_net_blocks(const char *filename, int mode)
659 {
660  FILE *fp;
661  int b;
662  int n;
663  int i;
664  int t;
665 
666  char* default_string = "*\t\t";
667  char **p = new char*[freeParameters()];
668  char **mu = new char*[freeParameters()];
669  for(int h=0; h<freeParameters(); h++) {
670  mu[h] = new char[50];
671  p[h] = new char[50];
672 
673  if(muts[h] == DEFAULT_VALUE) {
674  mu[h] = default_string;
675  } else {
676  sprintf(mu[h], "%f", muts[h]);
677  }
678 
679  if(freep[h] == DEFAULT_VALUE) {
680  p[h] = default_string;
681  } else {
682  sprintf(p[h], "%f", freep[h]);
683  }
684  }
685 
686  if ((fp = fopen(filename,"w")) != NULL) {
687  fprintf(fp,"ARCHITECTURE\n");
688  fprintf(fp,"nneurons %d\n", nneurons);
689  fprintf(fp,"nsensors %d\n", ninputs);
690  fprintf(fp,"nmotors %d\n", noutputs);
691  fprintf(fp,"nblocks %d\n", net_nblocks);
692  for (b = 0; b < net_nblocks; b++) {
693  fprintf(fp,"%d %d %d %d %d %d", net_block[b][0],net_block[b][1],net_block[b][2],net_block[b][3],net_block[b][4],net_block[b][5]);
694  if (net_block[b][0] == 0) {
695  fprintf(fp," // connections block\n");
696  } else if (net_block[b][0] == 1) {
697  fprintf(fp," // block to be updated\n");
698  } else if (net_block[b][0] == 2) {
699  fprintf(fp," // gain block\n");
700  } else if (net_block[b][0] == 3) {
701  fprintf(fp," // modulated gain block\n");
702  }
703  }
704  fprintf(fp,"neurons bias, delta, gain, xy position, display\n");
705  for(n = 0; n < nneurons; n++) {
706  fprintf(fp,"%d %d %d %d %d %d\n", neuronbias[n], neurontype[n], neurongain[n], neuronxy[n][0], neuronxy[n][1], neurondisplay[n]);
707  }
708 
710  if (mode == 1) {
711  fprintf(fp,"FREE PARAMETERS %d\n", nparameters);
712  for(i = 0; i < nneurons; i++) {
713  if (neurongain[i] == 1) {
714  fprintf(fp,"%s \t %s \tgain %s\n",*p, *mu, neuronl[i]);
715  p++;
716  mu++;
717  }
718  }
719  for(i=0; i<nneurons; i++) {
720  if (neuronbias[i] == 1) {
721  fprintf(fp,"%s \t %s \tbias %s\n",*p, *mu, neuronl[i]);
722  p++;
723  mu++;
724  }
725  }
726  for (b=0; b < net_nblocks; b++) {
727  if (net_block[b][0] == 0) {
728  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
729  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
730  fprintf(fp,"%s \t %s \tweight %s from %s\n",*p, *mu, neuronl[t], neuronl[i]);
731  p++;
732  mu++;
733  }
734  }
735  } else if (net_block[b][0] == 1) {
736  for(t=net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
737  if (neurontype[t] == 1) {
738  float timeC = 0;
739  if(*p != default_string) {
740  timeC = atof(*p);
741  timeC = fabs(timeC)/wrange; //(timeC + wrange)/(wrange*2);
742  }
743 
744  fprintf(fp,"%s \t %s \ttimeconstant %s (%f)\n", *p, *mu, neuronl[t], timeC);
745  p++;
746  mu++;
747  }
748  }
749  }
750  }
751  }
752  fprintf(fp,"END\n");
753 
754  Logger::info( "Evonet - controller saved on file " + QString(filename) );
755  } else {
756  Logger::error( "Evonet - unable to create the file " + QString(filename) );
757  }
758  fclose(fp);
759 }
760 
761 /*
762  * standard logistic
763  */
764 float Evonet::logistic(float f)
765 {
766  return((float) (1.0 / (1.0 + exp(0.0 - f))));
767 }
768 
769  float Evonet::tansig(float f) {
770 
771  return 2.0/(1.0+exp(-2.0*f))-1.0;
772  }
773 
774 /*
775  * compute the number of free parameters
776  */
778 {
779  int i;
780  int t;
781  int b;
782  int updated[MAXN];
783  int ng;
784  int nwarnings;
785 
786  ng = 0;
787  for(i=0;i < nneurons;i++) {
788  updated[i] = 0;
789  }
790  // gain
791  for(i=0;i < nneurons;i++) {
792  if (neurongain[i] == 1) {
793  ng++;
794  }
795  }
796  // biases
797  for(i=0;i < nneurons;i++) {
798  if (neuronbias[i] == 1) {
799  ng++;
800  }
801  }
802  // timeconstants
803  for(i=0;i < nneurons;i++) {
804  if (neurontype[i] == 1) {
805  ng++;
806  }
807  }
808  // blocks
809  for (b=0; b < net_nblocks; b++) {
810  // connection block
811  if (net_block[b][0] == 0) {
812  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
813  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
814  ng++;
815  }
816  }
817  }
818  }
819 
820  nwarnings = 0;
821  for(i=0;i < nneurons;i++) {
822  if (updated[i] < 1 && nwarnings == 0) {
823  Logger::warning( "Evonet - neuron " + QString::number(i) + " will never be activated according to the current architecture" );
824  nwarnings++;
825  }
826  if (updated[i] > 1 && nwarnings == 0) {
827  Logger::warning( "Evonet - neuron " + QString::number(i) + " will be activated more than once according to the current architecture" );
828  nwarnings++;
829  }
830  }
831  nparameters=ng; // number of parameters
832 }
833 
835 {
836  int i;
837  int t;
838  int b;
839  float *p;
840  float delta;
841  float netinput[MAXN];
842  float gain[MAXN];
843 
844  p = freep;
845  //nl = neuronlesion;
846 
847  // gain
848  for(i=0;i < nneurons;i++) {
849  if (neurongain[i] == 1) {
850  gain[i] = (float) (fabs((double) *p) / wrange) * grange;
851  p++;
852  } else {
853  gain[i] = 1.0f;
854  }
855  }
856  // biases
857 
858 /* printf("weights: ");
859  printWeights();
860  printf("\n");
861 */
862  for(i=0;i < nneurons;i++) {
863  if (neuronbias[i] == 1) {
864 // printf("netinput[%d]= [%ld] %f/%f*%f = %f*%f = %f",i,p-freep, *p,wrange,brange,((double)*p/wrange),brange,((double)*p/wrange)*brange);
865  netinput[i] = ((double)*p/wrange)*brange;
866  p++;
867  } else {
868  netinput[i] = 0.0f;
869  }
870  }
871 
872  // blocks
873  for (b=0; b < net_nblocks; b++) {
874  // connection block
875  if (net_block[b][0] == 0) {
876  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
877  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
878  netinput[t] += act[i] * gain[i] * *p;
879 // printf("netinput[%d] += act[%d] * gain[%d] * %f += %f * %f * %f += %f = %f\n",t,i,i,*p,act[i],gain[i], *p,act[i] * gain[i] * *p, netinput[t] );
880  p++;
881  }
882  }
883  }
884  // gain block (gain of neuron a-b set equal to gain of neuron a)
885  if (net_block[b][0] == 2) {
886  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
887  gain[t] = gain[net_block[b][1]];
888  }
889  }
890  // gain block (gain of neuron a-b set equal to act[c])
891  if (net_block[b][0] == 3) {
892  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
893  gain[t] = act[net_block[b][3]];
894  }
895  }
896  // update block
897  if (net_block[b][0] == 1) {
898  for(t=net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
899  if (t < ninputs) {
900  switch(neurontype[t]) {
901  case 0: // simple rely units
902  act[t] = input[t];
903  break;
904  case 1: // delta neurons
905  delta = (float) (fabs((double) *p) / wrange);
906  p++;
907  act[t] = (act[t] * delta) + (input[t] * (1.0f - delta));
908  // Check whether activation is within range [0,1]
909  if (act[t] < 0.0)
910  {
911  act[t] = 0.0;
912  }
913  if (act[t] > 1.0)
914  {
915  act[t] = 1.0;
916  }
917  break;
918  }
919  if(neuronlesions > 0 && neuronlesion[t]) {
920  act[t]= (float)neuronlesionVal[t];
921  }
922  } else {
923  switch(neurontype[t]) {
924  case 0: // simple logistic
925  default:
926  act[t] = logistic(netinput[t]);
927  delta = 0.0;
928  break;
929  case 1: // delta neurons
930  delta = (float) (fabs((double) *p) / wrange);
931  p++;
932  act[t] = (act[t] * delta) + (logistic(netinput[t]) * (1.0f - delta));
933  // Check whether activation is within range [0,1]
934  if (act[t] < 0.0)
935  {
936  act[t] = 0.0;
937  }
938  if (act[t] > 1.0)
939  {
940  act[t] = 1.0;
941  }
942  break;
943  case 2: // binary neurons
944  if (netinput[t] >= 0.0) {
945  act[t] = 1.0;
946  } else {
947  act[t] = 0.0;
948  }
949  break;
950  case 3: // logistic2 neurons
951  act[t] = logistic(netinput[t]*0.2f);
952  delta = 0.0;
953  break;
954  }
955  if(neuronlesions > 0 && neuronlesion[t]) {
956  act[t]= (float)neuronlesionVal[t];
957  }
958  }
959  }
960  }
961  }
962 
963  // Storing the current activations
964  memcpy(storedActivations[nextStoredActivation], act, nneurons * sizeof(float));
965  nextStoredActivation = (nextStoredActivation + 1) % MAXSTOREDACTIVATIONS;
966  if (firstStoredActivation == nextStoredActivation) {
967  // We have filled the circular buffer, discarding the oldest activation
968  firstStoredActivation = (firstStoredActivation + 1) % MAXSTOREDACTIVATIONS;
969  }
970 
971  // increment the counter
972  updatescounter++;
973 
974  emit evonetUpdated();
975 
976  // If a downloader is associated with the neuronsMonitorUploader, uploading activations
977  if (neuronsMonitorUploader.downloaderPresent() && updateMonitor) {
978  // This call can return NULL if GUI is too slow
979  DatumToUpload<ActivationsToGui> d(neuronsMonitorUploader);
980 
981  d->activations = true;
982 
983  // Reserving the correct number of elements, for efficiency reasons
984  d->data.reserve(nneurons);
985  d->data.resize(nneurons);
986 
987  // Copying data
988  std::copy(act, &(act[nneurons]), d->data.begin());
989 
990  // Adding the current step
991  d->updatesCounter = updatescounter;
992 
993  if (updateNeuronMonitor) {
994  updateNeuronMonitor = false;
995 
996  // we also send labels and colors and tell the gui to update them
997  d->updateLabelAndColors = true;
998 
999  // Copying labels
1000  d->neuronl.resize(nneurons);
1001  for (int i = 0; i < nneurons; ++i) {
1002  d->neuronl[i] = neuronl[i];
1003  }
1004 
1005  // Copying colors
1006  d->neurondcolor.resize(nneurons);
1007  for (int i = 0; i < nneurons; ++i) {
1008  d->neurondcolor[i] = neurondcolor[i];
1009  }
1010  } else {
1011  d->updateLabelAndColors = false;
1012  }
1013  }
1014 }
1015 
1016 int Evonet::setInput(int inp, float value)
1017 {
1018  if (inp>=ninputs || inp<0) {
1019  return -1;// exceding sensor number
1020  }
1021  input[inp]=value;
1022  return 0;
1023 }
1024 
1025 float Evonet::getOutput(int out)
1026 {
1027  if(out>=noutputs) {
1028  return -1; //exceeding out numbers
1029  }
1030  return act[ninputs+nhiddens+out];
1031 }
1032 
1033 float Evonet::getInput(int in)
1034 {
1035  return this->input[in];
1036 }
1037 
1038 float Evonet::getNeuron(int in)
1039 {
1040  return act[in];
1041 }
1042 
1044 {
1045  int i;
1046  for (i = 0; i < MAXN; i++) {
1047  act[i]=0.0;
1048  netinput[i]=0.0;
1049  input[i]=0.0;
1050  }
1051  updatescounter = 0;
1052 }
1053 
1054 void Evonet::injectHidden(int nh, float val)
1055 {
1056  if(nh<nhiddens) {
1057  act[this->ninputs+nh] = val;
1058  }
1059 }
1060 
1061 float Evonet::getHidden(int h)
1062 {
1063  if(h<nhiddens && h>=0) {
1064  return act[this->ninputs+h];
1065  } else {
1066  return -999;
1067  }
1068 }
1069 
1071 {
1072  return this->nparameters;
1073 }
1074 
1076 {
1077  return freep[i];
1078 }
1079 
1081 {
1082  return pheloaded;
1083 }
1084 
1085 /*
1086  * Copy parameters from genotype
1087  */
1088 void Evonet::setParameters(const float *dt)
1089 {
1090  int i;
1091  float *p;
1092 
1093  p = freep;
1094  for (i=0; i<freeParameters(); i++, p++) {
1095  *p = dt[i];
1096  }
1097  emit evonetUpdated();
1098 }
1099 
1100 void Evonet::setParameters(const int *dt)
1101 {
1102  int i;
1103  float *p;
1104 
1105  p = freep;
1106  for (i=0; i<freeParameters(); i++, p++) {
1107  *p = wrange - ((float)dt[i]/geneMaxValue)*wrange*2;
1108  }
1109  emit evonetUpdated();
1110 }
1111 
1112 void Evonet::getMutations(float* GAmut)
1113 {
1114  //copy mutation vector
1115  for(int i=0; i<freeParameters(); i++) {
1116  GAmut[i] = muts[i];
1117  }
1118 }
1119 
1120 void Evonet::copyPheParameters(int* pheGene)
1121 {
1122  for(int i=0; i<freeParameters(); i++)
1123  {
1124  if(phep[i] == DEFAULT_VALUE) {
1125  pheGene[i] = DEFAULT_VALUE;
1126  } else {
1127  pheGene[i] = (int)((wrange - phep[i])*geneMaxValue/(2*wrange));
1128  }
1129  }
1130 }
1131 
1133 {
1134  QString output;
1135 
1136  output = "In: ";
1137  for (int in = 0; in < this->ninputs; in++) {
1138  output += QString("%1 ").arg(this->input[in], 0, 'f', 10);
1139  }
1140  output += "Hid: ";
1141  for (int hi = this->ninputs; hi < (this->nneurons - this->noutputs); hi++) {
1142  output += QString("%1 ").arg(this->act[hi], 0, 'f', 10);
1143  }
1144  output += "Out: ";
1145  for (int out = 0; out < this->noutputs; out++) {
1146  output += QString("%1 ").arg(this->act[this->ninputs+this->nhiddens+out], 0, 'f', 10);
1147  }
1148 
1149  Logger::info(output);
1150 
1151 }
1152 
1153 int Evonet::getParamBias(int nbias)
1154 {
1155  int pb=-999; // if remain -999 it means nbias is out of range
1156  if (nbias<nparambias && nbias>-1) {
1157  pb=(int) freep[nparambias+nbias];
1158  }
1159  return pb;
1160 }
1161 
1163 {
1164  return wrange;
1165 }
1166 
1168 {
1169  return brange;
1170 }
1171 
1173 {
1174  return grange;
1175 }
1176 
1177 
1179 {
1180  Logger::info("Evonet - ninputs " + QString::number(this->ninputs));
1181  Logger::info("Evonet - nhiddens " + QString::number(this->nhiddens));
1182  Logger::info("Evonet - noutputs " + QString::number(this->noutputs));
1183  Logger::info("Evonet - nneurons " + QString::number(this->nneurons));
1184 
1185  for(int i=0;i<this->net_nblocks;i++) {
1186  Logger::info( QString( "Evonet Block - %1 | %2 - %3 -> %4 - %5 | %6" )
1187  .arg(net_block[i][0])
1188  .arg(net_block[i][1])
1189  .arg(net_block[i][2])
1190  .arg(net_block[i][3])
1191  .arg(net_block[i][4])
1192  .arg(net_block[i][5]));
1193  }
1194 }
1195 
1197 {
1198  return ninputs;
1199 }
1200 
1202 {
1203  return nhiddens;
1204 }
1205 
1207 {
1208  return noutputs;
1209 }
1210 
1212 {
1213  return nneurons;
1214 }
1215 
1216 void Evonet::setRanges(double weight, double bias, double gain)
1217 {
1218  wrange=weight;
1219  brange=bias;
1220  grange=gain;
1221 }
1222 
1224 {
1225  if (firstStoredActivation == nextStoredActivation) {
1226  return NULL;
1227  }
1228 
1229  const int ret = firstStoredActivation;
1230  firstStoredActivation = (firstStoredActivation + 1) % MAXSTOREDACTIVATIONS;
1231  return storedActivations[ret];
1232 }
1233 
1235 {
1236  return updatescounter;
1237 }
1238 
1240 {
1241  for (int i = 0; i<freeParameters(); i++) {
1242  printf("%.2f ",freep[i]);
1243  }
1244  printf("\n");
1245 }
1246 
1248 {
1249  float range = max-min;
1250 
1251  for (int i = 0; i<freeParameters(); i++) {
1252  freep[i] = (((float) rand())/RAND_MAX)*range +min;
1253 // freep[i] = 0.2;
1254  }
1255 
1256 /*
1257  freep[0] = 0.26;
1258  freep[1] = 0.27;
1259  freep[2] = 0.28;
1260  freep[3] = -0.23;
1261  freep[4] = 0.2;
1262  freep[5] = 0.21;
1263  freep[6] = 0.22;
1264  freep[7] = 0.23;
1265  freep[8] = 0.24;
1266  freep[9] = 0.25;
1267  freep[10] = -0.2;
1268  freep[11] = -0.21;
1269  freep[12] = -0.22;
1270 */
1271 }
1272 
1273 void Evonet::initWeightsInRange(float minBias, float maxBias, float minWeight, float maxWeight)
1274 {
1275  const float brange = maxBias - minBias;
1276  const float wrange = maxWeight - minWeight;
1277 
1278  // Pointer to free parameters to randomize them
1279  float* p = freep;
1280 
1281  // Randomizing gains. We use the biases range for them
1282  for(int i = 0; i < nneurons; ++i) {
1283  if (neurongain[i] == 1) {
1284  *(p++) = (((float) rand()) / float(RAND_MAX)) * brange + minBias;
1285  }
1286  }
1287 
1288  // Now randomizing biases
1289  for(int i = 0; i < nneurons; ++i) {
1290  if (neuronbias[i] == 1) {
1291  *(p++) = (((float) rand()) / float(RAND_MAX)) * brange + minBias;
1292  }
1293  }
1294 
1295  // Finally randomizing all the rest (there should be only weights, here)
1296  for (; p != &freep[freeParameters()]; ++p) {
1297  *p = (((float) rand()) / float(RAND_MAX)) * wrange + minWeight;
1298  }
1299 }
1300 
1302 
1303  initWeightsInRange(min, max);
1304 
1305  double beta = 0.7 * pow(nhiddens, 1.0/ninputs);
1306  double norm = 0;
1307 
1308  double tmp;
1309 
1310  for (int i =0; i<nhiddens; i++) {
1311  for (int j = 0; j<ninputs; j++) {
1312  tmp = getWeight(i+ninputs, j);
1313  norm += tmp*tmp;
1314  }
1315 
1316  if (neuronbias[i+ninputs]) {
1317 
1318  int ptr = 0;
1319  for (int j = 0; j<i+ninputs; j++) {
1320  if(neuronbias[j])
1321  ptr++;
1322  }
1323 
1324  norm += freep[ptr]*freep[ptr];
1325  }
1326 
1327 
1328  }
1329 
1330  norm = sqrt(norm);
1331 
1332  double k = beta/norm;
1333 
1334  for (int i =0; i<nhiddens; i++) {
1335  for (int j = 0; j<ninputs; j++) {
1336  setWeight(i+ninputs, j, getWeight(i+ninputs, j)*k);
1337  }
1338 
1339  if (neuronbias[i+ninputs]) {
1340 
1341  int ptr = 0;
1342  for (int j = 0; j<i+ninputs; j++) {
1343  if(neuronbias[j])
1344  ptr++;
1345  }
1346 
1347  freep[ptr]*=k;
1348  }
1349  }
1350 
1351 
1352  }
1353 
1354 
1356 
1357  //for (int i = 0; i<freeParameters(); i++) {
1358  // freep[i] = 0;
1359  //}
1360 
1361  int ptr = 0;
1362 
1363  for (int i = 0; i<nneurons; i++) {
1364  if (neuronbias[i]) {
1365  if (i>16 && i<23) {
1366  freep[ptr++]=0;
1367  }
1368  else {
1369 // freep[ptr++]=;
1370  ptr++;
1371  }
1372  }
1373  }
1374 
1375 
1376  for (int b=0; b<net_nblocks; b++) {
1377  for (int i=0; i<net_block[b][2]*net_block[b][4]; i++) {
1378  if (net_block[b][0] == 0 && net_block[b][5]==1){
1379 // freep[ptr++]=-1;
1380  ptr++;
1381  }
1382  else if (net_block[b][0] == 0 && net_block[b][5]==0){
1383  freep[ptr++]=0;
1384  }
1385  }
1386  }
1387  //Serve ad impostare i pesi del riflesso
1388 
1389  int p = 0;
1390  for (int i = 0; i<nneurons; i++)
1391  p += neuronbias[i];
1392 
1393 
1394  freep[p++] = 0;
1395  freep[p++] = 15;
1396  freep[p++] = -15;
1397  freep[p] = 0;
1398 
1399 
1400 
1401  }
1402 
1403 //#define BPDEBUG
1404 
1405 #ifdef BPDEBUG
1406 #define bpdebug(x,...) printf(x,##__VA_ARGS__)
1407 #define debug(x,...) printf(x,##__VA_ARGS__)
1408 #else
1409 #define bpdebug(x,...)
1410 #define debug(x,...)
1411 #endif
1412 
1413 #define inRange(x,y,z) ( (x) >= (y) && (x) < (y)+(z) )
1414 
1415  int Evonet::isHidden(int neuron){
1416  return neuron >= ninputs && neuron < ninputs+nhiddens;
1417  }
1418 
1419 /* float Evonet::backPropStep(QVector<float> tInput, double rate) {
1420 
1421  float globalError = 0;
1422 
1423  float delta[MAXN];
1424  float error[MAXN];
1425 
1426  int b;
1427  int nbiases = 0;
1428 
1429  for (int i = 0; i<nneurons; i++, nbiases += neuronbias[i]);
1430 
1431  bpdebug("%s net: %p, net_blocks: %p\n",__PRETTY_FUNCTION__,this, net_block);
1432 
1433 #ifdef BPDEBUG
1434  printf("netinput: ");
1435 
1436  for (int i = 0; i<ninputs; i++) {
1437  printf("%f ",input[i]);
1438  }
1439  printf("\n");
1440 
1441  printWeights();
1442  printIO();
1443 #endif
1444 
1445  bpdebug("nbiases: %d\n\n", nbiases);
1446 
1447  //
1448  // Storing offsets to easily obtain the staring position
1449  // of the weights for each connection block
1450  //
1451 
1452  bpdebug("Offsets:");
1453 
1454  QVector<int> offsets;
1455  offsets.push_back(nbiases);
1456  bpdebug("\t%d", offsets[0]);
1457 
1458  int p = 0;
1459 
1460  for (b = 0; b<net_nblocks; b++) {
1461  if (net_block[b][0]==0) {
1462  offsets.push_back(offsets[p++] + net_block[b][2]*net_block[b][4]);
1463  bpdebug("\t%d", offsets[p]);
1464  break;
1465  }
1466  }
1467 
1468 
1469  int bias_ptr = offsets[0]-1;
1470  bpdebug("\nbias_ptr: %d\n\n", bias_ptr);
1471 
1472  //
1473  // Compute the error for the output neurons
1474  //
1475  // NOTE : this assumes that the last connection block to train (net_block[b][5]==1) is the
1476  // one of the outputs. This code should be modified to consider the case in which the
1477  // connections to the output neurons are divided in two (or more) blocks.
1478  // It may be useful to have a function/flag to indicate wether a connection block involves
1479  // output neurons or not.
1480  //
1481 
1482 
1483  bpdebug("Error for last block\n");
1484  for (b=net_nblocks-1; b>=0; b--) {
1485  if (! (net_block[b][0]==0 && net_block[b][5]==1) )
1486  continue;
1487 
1488  int end = net_block[b][1]+net_block[b][2];
1489 
1490  for (int i = net_block[b][1]; i<end; i++) {
1491 
1492  float d = tInput[i-net_block[b][1]] - act[i];
1493 
1494  error[i] = act[i]*(1-act[i]) * d;
1495  delta[i] = error[i] * rate;
1496 
1497  bpdebug("\terror[%d]= gradient*(tInput[i - net_block[%d][3]]-act[%d]) = act[%d]*(1-act[%d]) * (tInput[%d - %d = %d]-act[%d]) = %f*(1-%f) * (%f - %f) = %f * %f = %f\n",i,b,i,i,i,i,net_block[b][1],i-net_block[b][1],i,act[i],act[i],tInput[i-net_block[b][1]],act[i],act[i]*(1-act[i]),(tInput[i-net_block[b][1]] - act[i]), error[i]);
1498  bpdebug("\terror[%d]= %f\n",i,error[i]);
1499 
1500  globalError+= d*d;
1501  }
1502 
1503  break;
1504  }
1505 
1506 
1507  //
1508  // Backpropagate the error
1509  //
1510 
1511  bpdebug("\nBackpropagate\n");
1512 
1513  p=offsets.size()-1;
1514  for (; b>=0; b--) {
1515 
1516  if (! (net_block[b][0]==0 && net_block[b][5]==1) )
1517  continue;
1518 
1519  //
1520  // First compute the error for the lower layer. This must be done
1521  // before updating the weights (since the error depends on the weights).
1522  // This step is not necessary if the lower layer is the input one.
1523  //
1524 
1525  //
1526  // i iterates over the "lower" layer, j over the "upper"
1527  //
1528 
1529  bpdebug("\tb: %d\n", b);
1530 
1531  if (net_block[b][3]+net_block[b][4] -1 > ninputs) {
1532 
1533  bpdebug("\tnetblock[%d][3]+net_block[%d][4] -1 = %d > %d => Computing the error\n", b,b,net_block[b][3]+net_block[b][4]-1, ninputs);
1534 
1535  for (int i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
1536 
1537  bpdebug("\ti: %d\n", i);
1538  error[i] = 0;
1539 
1540  for (int j= net_block[b][1]; j<net_block[b][1]+net_block[b][2]; j++) {
1541 
1542  error[i]+= error[j] * freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]];
1543  bpdebug("\t\tj: %d\n", j);
1544  bpdebug("\t\terror[%d] += act[j] * freep[ offset[p] + (i-net_block[b][3]) + j*net_block[b][2] ] = act[%d] * freep[ %d + %d + %d*%d ] = %f * %f = %f\n", i,j,offsets[p],(i-net_block[b][3]),(j-net_block[b][1]),net_block[b][2], act[j], freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]], act[j]*freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]] );
1545  bpdebug("\t\terror[%d]: %f\n", i,error[i]);
1546  }
1547 
1548  error[i]*= act[i]*(1-act[i]);
1549  bpdebug("\t\terror[%d]: %f\n", i,error[i]);
1550  delta[i] = error[i]*rate;
1551  bpdebug("\t\tdelta[%d]: %f\n", i,delta[i]);
1552  }
1553 
1554  }
1555 
1556  //
1557  // Then modify the weights of the connections from the lower to the upper layer
1558  //
1559 
1560  bpdebug("\n\tUpdating weights\n");
1561  for (int j= net_block[b][1]+net_block[b][2]-1; j>=net_block[b][1]; j--) {
1562  bpdebug("\tj: %d\n", j);
1563 
1564  if (neuronbias[j]) {
1565  freep[bias_ptr--] += delta[j];
1566  bpdebug("\t\tNeuron has bias\n");
1567  bpdebug("\t\t\tfreep[bias_ptr] = freep[ %d ] += delta[%d] = %f\n", bias_ptr+1, j, delta[j]);
1568  bpdebug("\t\t\tfreep[%d]: %f\n", bias_ptr+1, freep[bias_ptr+1]);
1569  }
1570 
1571  for (int i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
1572  freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]] += delta[j] * act[i];
1573  bpdebug("\t\ti: %d\n", i);
1574  bpdebug("\t\t\tfreep[ offset[%d] + (i-net_block[%d][3]) + j*net_block[%d][2] ] = freep[ %d + %d + %d*%d = %d ] += delta[%d] * act[%d] = %f * %f = %f\n", p, b,b, offsets[p], i-net_block[b][3], j-net_block[b][1],net_block[b][4],offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4],j,i,delta[j],act[i],delta[j]*act[i] );
1575  bpdebug("\t\t\tfreep[ %d ] = %f\n", offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4], freep[offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]]);
1576  }
1577 
1578 
1579  }
1580 
1581  p--;
1582 
1583  }
1584 
1585  bpdebug("\n\n");
1586  return globalError;
1587 
1588 
1589  }
1590 */
1591 
1593 
1594  for (int i = 0; i<nneurons; i++) {
1595  printf("act[%d]: %f\n",i,act[i]);
1596  }
1597  }
1598 
1599 
1600 
1601  float Evonet::computeMeanSquaredError(QVector<float> trainingSet, QVector<float> desiredOutput) {
1602 
1603  float err = 0;
1604  int size = trainingSet.size()/ninputs;
1605 // int nOutputToTrain = desiredOutput.size() / size;
1606 
1607  int ptr = 0;
1608  double tmp;
1609 
1610  for (int i = 0; i<nneurons; i++) {
1611  act[i] = 0;
1612  }
1613 
1614 
1615  for (int i = 0; i<size; i++) {
1616 
1617  for (int j = 0; j<ninputs; j++) {
1618  setInput(j, trainingSet[i*ninputs + j]);
1619  }
1620 
1621  updateNet();
1622 
1623  for (int j=0; j<noutputs; j++) {
1624  if (!outputsToTrain[j])
1625  continue;
1626 
1627  tmp = desiredOutput[ptr++] - act[j+ninputs+nhiddens];
1628 // printf("d[%d] - act[%d] = %f - %f = %f\n",ptr-1,j+ninputs+nhiddens,desiredOutput[ptr-1 ], act[j+ninputs+nhiddens], tmp);
1629  err += tmp*tmp*err_weights[j]*err_weights[j];
1630 
1631  if (isinf(err)) {
1632  printf("INF!!\n");
1633  }
1634 
1635  }
1636 
1637  }
1638 
1639 // printf("err: %f\n",err);
1640 
1641  return err / (err_weight_sum*size);
1642 
1643  }
1644 
1645  int Evonet::extractWeightsFromNet(Eigen::VectorXf& w) {
1646 
1647  //*
1648  // This code assumes that input neurons have no bias and
1649  // none of the neurons has gain.
1650  //*
1651 
1652  int wPtr = 0, paramPtr = 0;
1653 
1654  int nbiases = 0;
1655  for (int i = 0; i<nneurons; i++) {
1656  nbiases += (neuronbias[i]==1);
1657  }
1658  paramPtr = nbiases;
1659 
1660  //*
1661  // Search for neurons to train
1662  //*
1663 
1664  for (int b = 0; b<net_nblocks; b++) {
1665 
1666  //*
1667  // Output neurons are treated separately. You need however to go through
1668  // all the blocks since it is not said that the output blocks will all be at the end.
1669  //*
1670 
1671  if (net_block[b][0] != 0)
1672  continue;
1673 
1674  if (net_block[b][5]==1 && !(net_block[b][1]>=ninputs+nhiddens)) {
1675  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1676 
1677  if (neuronbias[i]) {
1678 
1679  //*
1680  // If the neuron has a bias, in order to obtain the index of the
1681  // corresponding bias in vector freep, we iterate over all "previous"
1682  // neurons, counting those who have a bias.
1683  //*
1684 
1685  int ptr = 0;
1686  for (int j = 0; j<i; j++) {
1687  if(neuronbias[j])
1688  ptr++;
1689  }
1690 
1691  debug("Adding bias of neuron %d (freep[%d]) in w[%d]\n",i,ptr,wPtr);
1692  w[wPtr++] = freep[ptr];
1693  }
1694 
1695  //*
1696  // Adding weights of the connections of the i-th neurons with neurons
1697  // in the "lower" block
1698  //*
1699 
1700  for (int j = 0; j<net_block[b][4]; j++) {
1701  debug("Adding connection %d of neuron %d (freep[%d]) in w[%d]\n",j,i,paramPtr,wPtr);
1702  w[wPtr++] = freep[paramPtr++];
1703  }
1704  }
1705  }
1706  else
1707  paramPtr+= net_block[b][2]*net_block[b][4];
1708  }
1709 
1710 
1711  //*
1712  // Output neurons are stored sequentially, from the first to the last.
1713  //*
1714 
1715 
1716  for (int i = 0; i<noutputs; i++) {
1717  paramPtr = nbiases;
1718  if (!outputsToTrain[i])
1719  continue;
1720 
1721  int i_freep = i+ninputs+nhiddens;
1722 
1723  if (neuronbias[i_freep]) {
1724 
1725  //*
1726  // If the neuron has a bias, in order to obtain the index of the
1727  // corresponding bias in vector freep, we iterate over all "previous"
1728  // neurons, counting those who have a bias.
1729  //*
1730 
1731  int ptr = 0;
1732  for (int j = 0; j<i_freep; j++) {
1733  if(neuronbias[j])
1734  ptr++;
1735  }
1736 
1737  debug("Adding bias of output %d (freep[%d]) in w[%d]\n",i,ptr,wPtr);
1738  w[wPtr++] = freep[ptr];
1739  }
1740 
1741 
1742 
1743  for (int b = 0; b<net_nblocks; b++) {
1744 
1745  debug("Accessing trainingHiddenBlock[net_block[%d][3] = %d][%d]\n",b,net_block[b][3],i);
1746  if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1747 // if (net_block[b][0] == 0) {
1748  paramPtr+= net_block[b][2]*net_block[b][4];
1749 // }
1750  debug("\tparamPtr: %d\n", paramPtr);
1751  continue;
1752  }
1753 
1754  //*
1755  // Iterate over hidden neurons in the current block
1756  //*
1757 
1758  for (int j = 0; j<net_block[b][4]; j++) {
1759  debug("Adding connection %d of output %d (freep[%d]) in w[%d]\n",j,i_freep,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr,wPtr);
1760  w[wPtr++] = freep[(i_freep-net_block[b][1])*net_block[b][4] + paramPtr++];
1761  }
1762  }
1763 
1764  }
1765 
1766  for (int i = 0; i<w.size(); i++) {
1767  debug("%f\n",w[i]);
1768  }
1769 
1770 
1771  return wPtr;
1772  }
1773 
1774  int Evonet::importWeightsFromVector(Eigen::VectorXf& w) {
1775 
1776  //*
1777  // Inverse procedure of the extraction of the weights (see extractWeightsFromVector() ).
1778  //*
1779 
1780  int wPtr = 0, paramPtr = 0;
1781 
1782  int nbiases = 0;
1783  for (int i = 0; i<nneurons; i++) {
1784  nbiases += (neuronbias[i]==1);
1785  }
1786  paramPtr = nbiases;
1787 
1788  //*
1789  // Search for neurons to train
1790  //*
1791 
1792  for (int b = 0; b<net_nblocks; b++) {
1793 
1794  //*
1795  // Output neurons are treated separately. You need however to go through
1796  // all the blocks since it is not said that the output blocks will all be at the end.
1797  //*
1798 
1799  if (net_block[b][0] != 0)
1800  continue;
1801 
1802  if (net_block[b][5]==1 && !(net_block[b][1]>=ninputs+nhiddens)) {
1803  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1804 
1805  if (neuronbias[i]) {
1806 
1807  //*
1808  // If the neuron has a bias, in order to obtain the index of the
1809  // corresponding bias in vector freep, we iterate over all "previous"
1810  // neurons, counting those who have a bias.
1811  //*
1812 
1813  int ptr = 0;
1814  for (int j = 0; j<i; j++) {
1815  if(neuronbias[j])
1816  ptr++;
1817  }
1818 
1819  debug("Adding bias of neuron %d (w[%d]) in freep[%d]\n",i,wPtr,ptr);
1820  freep[ptr] = w[wPtr++];
1821  }
1822 
1823  //*
1824  // Adding weights of the connections of the i-th neurons with neurons
1825  // in the "lower" block
1826  //*
1827 
1828  for (int j = 0; j<net_block[b][4]; j++) {
1829  debug("Adding connection %d of neuron %d (w[%d]) in freep[%d]\n",j,i,wPtr,paramPtr);
1830  freep[paramPtr++] = w[wPtr++];
1831  }
1832  }
1833  }
1834  else
1835  paramPtr+= net_block[b][2]*net_block[b][4];
1836  }
1837 
1838 
1839  //*
1840  // Output neurons are stored sequentially, from the first to the last.
1841  //*
1842 
1843 
1844  for (int i = 0; i<noutputs; i++) {
1845  paramPtr = nbiases;
1846  if (!outputsToTrain[i])
1847  continue;
1848 
1849  int i_freep = i+ninputs+nhiddens;
1850 
1851  if (neuronbias[i_freep]) {
1852 
1853  //*
1854  // If the neuron has a bias, in order to obtain the index of the
1855  // corresponding bias in vector freep, we iterate over all "previous"
1856  // neurons, counting those who have a bias.
1857  //*
1858 
1859  int ptr = 0;
1860  for (int j = 0; j<i_freep; j++) {
1861  if(neuronbias[j])
1862  ptr++;
1863  }
1864  debug("Adding bias of output %d (w[%d]) in freep[%d]\n",i,wPtr,ptr);
1865  freep[ptr] = w[wPtr++];
1866  }
1867 
1868 
1869 
1870  for (int b = 0; b<net_nblocks; b++) {
1871 
1872  if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1873 // if(! trainingHiddenBlock[net_block[b][3]][i]) {
1874  paramPtr+= net_block[b][2]*net_block[b][4];
1875  continue;
1876  }
1877 
1878  //*
1879  // Iterate over hidden neurons in the current block
1880  //*
1881 
1882  for (int j = 0; j<net_block[b][4]; j++) {
1883  debug("Adding connection %d of output %d (w[%d]) in freep[%d]\n",j,i_freep,wPtr,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr);
1884  freep[(i_freep-net_block[b][1])*net_block[b][4] + paramPtr++] = w[wPtr++];
1885  }
1886  }
1887 
1888  }
1889 
1890  return wPtr;
1891  }
1892 
1893  float Evonet::getWeight(int to, int from) {
1894 
1895  debug("Getting w to %d from %d\n", to,from);
1896  int ptr = 0;
1897  for (int i = 0; i<nneurons; i++) {
1898  ptr += neuronbias[i]==1;
1899  }
1900 
1901  for (int b = 0; b<net_nblocks; b++) {
1902  if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1903  ptr+= (to-net_block[b][1])*net_block[b][4]+(from-net_block[b][3]);
1904  }
1905  else {
1906  ptr+= net_block[b][2]*net_block[b][4];
1907  }
1908  }
1909 
1910  debug("Returning freep[%d]\n", ptr);
1911  if (ptr >= freeParameters()) {
1912  return 0;
1913  }
1914 
1915  return freep[ptr];
1916  }
1917 
1918  void Evonet::setWeight(int to, int from, float w) {
1919 
1920  int ptr = 0;
1921  for (int i = 0; i<nneurons; i++) {
1922  ptr += neuronbias[i]==1;
1923  }
1924 
1925  for (int b = 0; b<net_nblocks; b++) {
1926  if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1927  ptr+= (to-net_block[b][1])*net_block[b][4]+(from-net_block[b][3]);
1928  }
1929  else {
1930  ptr+= net_block[b][2]*net_block[b][4];
1931  }
1932  }
1933 
1934  freep[ptr] = w;
1935 
1936 
1937  }
1938 
1939  float Evonet::derivative(int /*n*/, float x) {
1940 
1941  return x*(1-x);
1942 
1943  }
1944 
1945  void Evonet::prepareForTraining(QVector<float> &err_w) {
1946 
1947  nconnections = 0;
1948 
1949  //*
1950  // Initializing some variables in order to make easier the computation:
1951  //
1952  // nconnections: number of weights subject to training
1953  // outputsToTrain: outputsToTrain[i]==1 iff the i-th output is subject to training
1954  // n_outputsToTrain: number of output neurons to train
1955  //
1956  //*
1957 
1958 
1959  outputsToTrain = (char*)calloc(noutputs, sizeof(char));
1960  n_outputsToTrain = 0;
1961 
1962  trainingHiddenBlock = (char**)calloc(nneurons, sizeof(char*));
1963  for (int i = 0; i<nneurons; i++) {
1964  trainingHiddenBlock[i] = (char*) calloc(noutputs, sizeof(char));
1965  }
1966 
1967  for (int b=0; b<net_nblocks; b++)
1968 
1969  if (net_block[b][0]==0 && net_block[b][5]==1) {
1970  //*
1971  // If the current block is a connection block and the block is subject to training
1972  // count the connections of the current block
1973  //*
1974 
1975  nconnections += (net_block[b][2]*net_block[b][4]);
1976 
1977  //*
1978  // Sum the bias for the neurons in the current block
1979  //*
1980 
1981  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1982  nconnections += (neuronbias[i] == 1);
1983  }
1984 
1985  if (net_block[b][1] >= ninputs+nhiddens) {
1986  memset(outputsToTrain+net_block[b][1]-ninputs-nhiddens, 1, net_block[b][2]*sizeof(char));
1987  n_outputsToTrain += net_block[b][2];
1988 
1989  for(int j=0;j<net_block[b][4];j++)
1990  memset(&trainingHiddenBlock[net_block[b][3]+j][net_block[b][1]-ninputs-nhiddens], 1, net_block[b][2]*sizeof(char));
1991  }
1992 
1993  }
1994 
1995 #ifdef BPDEBUG
1996  printf("n_outputToTrain: %d\n",n_outputsToTrain);
1997  printf("output to train: ");
1998 
1999  for (int i = 0; i<noutputs; i++) {
2000  printf("%d ",outputsToTrain[i]);
2001  }
2002  printf("\n");
2003 
2004  for (int j = 0; j<nneurons; j++) {
2005  for (int i = 0; i<noutputs; i++) {
2006  printf("%d ",trainingHiddenBlock[j][i]);
2007  }
2008  printf("\n");
2009  }
2010 #endif
2011  debug("nconnections: %d\n", nconnections);
2012 
2013  err_weight_sum = 0;
2014  for (int i = 0; i<err_w.size(); i++) {
2015  err_weights.push_back(err_w[i]*err_w[i]);
2016  err_weight_sum+=err_w[i];
2017  }
2018 
2019  printf("err_weight_sum : %f\n",err_weight_sum);
2020 
2021  }
2022 
2024  free(outputsToTrain);
2025  for(int i=0;i<nneurons;i++)
2026  free(trainingHiddenBlock[i]);
2027  free(trainingHiddenBlock);
2028 
2029  }
2030 
2032  {
2033  return showTInput;
2034  }
2035 
2037  updateMonitor = true;
2038  }
2039 
2041  updateMonitor = false;
2042  }
2043 
2044  /* BACKPROPAGATION ALGORITHMS */
2046  {
2047  return teachingInput[id];
2048  }
2049 
2051  {
2052  return backproperror;
2053  }
2054 
2055  float Evonet::backPropStep(QVector<float> tInput, double rate)
2056  {
2057  int i, t, b;
2058  float diff, temp;
2059  float *p;
2060 
2061  teachingInput = tInput;
2062 
2063  Eigen::MatrixXf weightMatrix(MAXN,MAXN); //a matrix representation of the weights
2064  float delta[MAXN];
2065  float global_error = 0.0;
2066 
2067  // Compute the weightMatrix
2068  p = freep;
2069 
2070  // Skip the gain
2071  for(i = 0; i < nneurons; i++) {
2072  if (neurongain[i] == 1) {
2073  p++;
2074  }
2075  }
2076  // Skip the biases
2077  for(i = 0; i < nneurons; i++) {
2078  if (neuronbias[i] == 1) {
2079  p++;
2080  }
2081  }
2082  // blocks
2083  for (b=0; b < net_nblocks; b++) {
2084  if (net_block[b][0] == 0) {
2085  for(t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
2086  for(i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
2087  weightMatrix(i,t) = *p;
2088  p++;
2089  }
2090  }
2091  }
2092  if (net_block[b][0] == 1) {
2093  for(t = net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
2094  // To manage leaky neurons
2095  if (neurontype[t] == 1) {
2096  p++;
2097  }
2098  }
2099  }
2100  }
2101 
2102  // First compute the deltas for the output neurons
2103  for (i = ninputs + nhiddens; i < nneurons; i++) {
2104  // If the teaching input assumes an invalid value (i.e. -99999.0),
2105  // that teaching input value does not care and the error is forced to 0
2106  if (tInput[i - (ninputs + nhiddens)] == -99999.0)
2107  {
2108  diff = 0.0;
2109  }
2110  else
2111  {
2112  diff = (tInput[i - (ninputs + nhiddens)] - act[i]);
2113  }
2114 
2115  delta[i] = diff * act[i] * ((float) 1.0 - act[i]);
2116 
2117  global_error += diff * diff;
2118  }
2119 
2120  // Then compute the deltas for the hidden neurons
2121  for (i = ninputs; i < ninputs + nhiddens; i++) {
2122  temp = (float) 0.0;
2123  for (t = ninputs + nhiddens; t < nneurons; t++) {
2124  temp += delta[t] * (weightMatrix(i,t));
2125  }
2126  delta[i] = ((float) 1.0 - act[i]) * act[i] * temp;
2127  }
2128 
2129  // Lastly, modify the weights
2130  p = freep;
2131 
2132  // Skip the gain
2133  for(i = 0; i < nneurons; i++) {
2134  if (neurongain[i] == 1) {
2135  p++;
2136  }
2137  }
2138 
2139  // Modify the biases
2140  for(i = 0; i < nneurons; i++) {
2141  if (neuronbias[i] == 1) {
2142  float dp_rate = delta[i] * rate;
2143  // modify the weight
2144  *p += (float) 1.0 * dp_rate;
2145  p++;
2146  }
2147  }
2148 
2149  // Update weight blocks
2150  for (b=0; b < net_nblocks; b++) {
2151  if (net_block[b][0] == 0) {
2152  for (t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
2153  for (i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
2154  if (t >= ninputs) {
2155  float dp_rate = delta[t] * rate;
2156  // modify the weight
2157  *p += act[i] * dp_rate;
2158  }
2159  p++;
2160  }
2161  }
2162  }
2163  if (net_block[b][0] == 1) {
2164  for(t = net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
2165  // To manage leaky neurons
2166  if (neurontype[t] == 1) {
2167  p++;
2168  }
2169  }
2170  }
2171  }
2172 
2173  backproperror = global_error;
2174 
2175  // If a downloader is associated with the neuronsMonitorUploader, uploading activations
2176  if (showTInput && neuronsMonitorUploader.downloaderPresent() && updateMonitor) {
2177  // This call can block if GUI is too slow
2178  DatumToUpload<ActivationsToGui> d(neuronsMonitorUploader);
2179 
2180  d->activations = false;
2181 
2182  // Reserving the correct number of elements, for efficiency reasons
2183  d->data.reserve(nneurons);
2184  d->data = tInput;
2185  d->data.append(backproperror);
2186 
2187  // Adding the current step
2188  d->updatesCounter = updatescounter;
2189  }
2190 
2191  return global_error;
2192  }
2193 
2194  float Evonet::backPropStep2(QVector<float> tInput, double rate)
2195  {
2196  int i, t, b;
2197  float diff, temp;
2198  float *p;
2199  float *bp;
2200 
2201  teachingInput = tInput;
2202 
2203  Eigen::MatrixXf weightMatrix(MAXN,MAXN); //a matrix representation of the weights
2204  float delta[MAXN];
2205  float global_error = 0.0;
2206 
2207  // Compute the weightMatrix
2208  p = freep;
2209 
2210  // Skip the gain
2211  for(i = 0; i < nneurons; i++) {
2212  if (neurongain[i] == 1) {
2213  p++;
2214  }
2215  }
2216  // Skip the biases
2217  for(i = 0; i < nneurons; i++) {
2218  if (neuronbias[i] == 1) {
2219  p++;
2220  }
2221  }
2222  // blocks
2223  for (b=0; b < net_nblocks; b++) {
2224  if (net_block[b][0] == 0) {
2225  for(t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
2226  for(i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
2227  weightMatrix(i,t) = *p;
2228  p++;
2229  }
2230  }
2231  }
2232  if (net_block[b][0] == 1) {
2233  for(t = net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
2234  // To manage leaky neurons
2235  if (neurontype[t] == 1) {
2236  p++;
2237  }
2238  }
2239  }
2240  }
2241 
2242  // First compute the deltas for the output neurons
2243  for (i = ninputs + nhiddens; i < nneurons; i++) {
2244  // If the teaching input assumes an invalid value (i.e. -99999.0),
2245  // that teaching input value does not care and the error is forced to 0
2246  if (tInput[i - (ninputs + nhiddens)] == -99999.0)
2247  {
2248  diff = 0.0;
2249  }
2250  else
2251  {
2252  diff = (tInput[i - (ninputs + nhiddens)] - act[i]);
2253  }
2254 
2255  delta[i] = diff * act[i] * ((float) 1.0 - act[i]);
2256 
2257  global_error += diff * diff;
2258  }
2259 
2260  // Then compute the deltas for the hidden neurons
2261  for (i = ninputs; i < ninputs + nhiddens; i++) {
2262  temp = (float) 0.0;
2263  for (t = ninputs + nhiddens; t < nneurons; t++) {
2264  temp += delta[t] * (weightMatrix(i,t));
2265  }
2266  delta[i] = ((float) 1.0 - act[i]) * act[i] * temp;
2267  }
2268 
2269  // Lastly, modify the weights
2270  bp = backpropfreep;
2271 
2272  // Skip the gain
2273  for(i = 0; i < nneurons; i++) {
2274  if (neurongain[i] == 1) {
2275  bp++;
2276  }
2277  }
2278 
2279  // Modify the biases
2280  for(i = 0; i < nneurons; i++) {
2281  if (neuronbias[i] == 1) {
2282  float dp_rate = delta[i] * rate;
2283  // modify the weight
2284  *bp += (float) 1.0 * dp_rate;
2285  bp++;
2286  }
2287  }
2288 
2289  // Update weight blocks
2290  for (b=0; b < net_nblocks; b++) {
2291  if (net_block[b][0] == 0) {
2292  for (t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
2293  for (i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
2294  if (t >= ninputs) {
2295  float dp_rate = delta[t] * rate;
2296  // modify the weight
2297  *bp += act[i] * dp_rate;
2298  }
2299  bp++;
2300  }
2301  }
2302  }
2303  if (net_block[b][0] == 1) {
2304  for(t = net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
2305  // To manage leaky neurons
2306  if (neurontype[t] == 1) {
2307  bp++;
2308  }
2309  }
2310  }
2311  }
2312 
2313  backproperror = global_error;
2314 
2315  // If a downloader is associated with the neuronsMonitorUploader, uploading activations
2316  if (showTInput && neuronsMonitorUploader.downloaderPresent() && updateMonitor) {
2317  // This call can block if GUI is too slow
2318  DatumToUpload<ActivationsToGui> d(neuronsMonitorUploader);
2319 
2320  d->activations = false;
2321 
2322  // Reserving the correct number of elements, for efficiency reasons
2323  d->data.reserve(nneurons);
2324  d->data = tInput;
2325  d->data.append(backproperror);
2326 
2327  // Adding the current step
2328  d->updatesCounter = updatescounter;
2329  }
2330 
2331  return global_error;
2332  }
2333 
2334  void Evonet::calculateBackPropagationError(QVector<float> tInput)
2335  {
2336  int i;
2337  float diff;
2338  float global_error = 0.0;
2339 
2340  // Calculate the backpropagation error
2341  for (i = ninputs + nhiddens; i < nneurons; i++) {
2342  diff = (tInput[i - (ninputs + nhiddens)] - act[i]);
2343 
2344  global_error += diff * diff;
2345  }
2346 
2347  // Save the backpropagation error
2348  backproperror = global_error;
2349  }
2350 
2352  {
2353  float* p;
2354  int i;
2355  p = backpropfreep;
2356 
2357  for (i = 0; i < nparameters; i++)
2358  {
2359  *p = 0.0f;
2360  p++;
2361  }
2362  }
2363 
2365  {
2366  float* p;
2367  float* origp;
2368  float dp;
2369  int i;
2370 
2371  p = freep;
2372  origp = backpropfreep;
2373 
2374  for (i = 0; i < nparameters; i++)
2375  {
2376  // Update freep
2377  dp = *origp;
2378  if (dp != 0)
2379  {
2380  //printf("Index: %d, delta: %.2f\n", i, dp);
2381  }
2382  *p += dp;
2383  // Reset backpropfreep vector
2384  *origp = 0.0f;
2385  origp++;
2386  p++;
2387  }
2388  }
2389 
2391  {
2392  return backpropfreep;
2393  }
2394 
2395  void Evonet::saveCopyBackPropFreep()
2396  {
2397  float* p;
2398  float* bp;
2399  int i;
2400 
2401  p = copybackpropfreep;
2402  bp = backpropfreep;
2403 
2404  for (i = 0; i < nparameters; i++)
2405  {
2406  *p += *bp;
2407  p++;
2408  bp++;
2409  }
2410  }
2411 
2412  void Evonet::initCopyBackPropFreep()
2413  {
2414  float* p;
2415  int i;
2416  p = copybackpropfreep;
2417 
2418  for (i = 0; i < nparameters; i++)
2419  {
2420  *p = 0.0f;
2421  p++;
2422  }
2423  }
2424 
2425  float* Evonet::getCopyBackPropFreep()
2426  {
2427  return copybackpropfreep;
2428  }
2429  /* END BACKPROPAGATION ALGORITHMS SECTION */
2430 
2431  // FUNCTIONS TO GET ACTIVATIONS AND NETINPUTS
2432  float* Evonet::getActivations()
2433  {
2434  return act;
2435  }
2436 
2437  float* Evonet::getNetInputs()
2438  {
2439  return netinput;
2440  }
2441 
2442  float Evonet::trainLevembergMarquardt(QVector<float> trainingSet, QVector<float> desiredOutput, float maxError) {
2443 
2444  training = true;
2445 
2446  int pattern, b;
2447 // int lastBlock;
2448  int cycles,i, j;
2449 
2450  double lambda=0.001;
2451  double currentError = 0, previousError= 0;
2452  double delta;
2453 
2454  if (nconnections == 0) {
2455  training = false;
2456  printf("nconnections: 0\nnothing to train\n");
2457  return 0;
2458  }
2459 
2460  int nbiases = 0;
2461  for (i = 0; i<nneurons; i++) {
2462  nbiases += neuronbias[i]==1;
2463  }
2464 
2465  int size = trainingSet.size() / ninputs;
2466  debug("npatters: %d\n", size);
2467 
2468  Eigen::VectorXf err(size*n_outputsToTrain);
2469  Eigen::MatrixXf jacobian(size*n_outputsToTrain, nconnections );
2470 // Eigen::MatrixXf inv_error_weight(size*n_outputsToTrain,size*n_outputsToTrain);
2471 // Eigen::MatrixXf jt_we(size*n_outputsToTrain, nconnections );
2472  Eigen::MatrixXf jj(nconnections,nconnections);
2473 
2474  Eigen::VectorXf new_weights(nconnections);
2475  Eigen::VectorXf old_weights(nconnections);
2476  Eigen::VectorXf ww_err(nconnections);
2477 
2478 
2479  previousError = computeMeanSquaredError(trainingSet, desiredOutput);
2480  printf("Initial error: %f\n",previousError);
2481 
2482 /* if (previousError < maxError) {
2483  printf("Error already below threshold - Nothing to do.\n");
2484  return previousError;
2485  }
2486  */
2487 
2488 // int end = (maxIterations > 0 ? maxIterations : INFINITY);
2490  for (cycles = 0; cycles<end; cycles++) {
2491 
2492  jacobian.setZero();
2493 
2494  extractWeightsFromNet(old_weights);
2495  debug("weights extracted\n");
2496 
2497  //*
2498  // Iterating over patterns. This can be easily parallelized using OpenMP
2499  //*
2500 
2501  for (pattern=0; pattern<size; pattern++) {
2502 
2503  debug("\n\n------------\n\n");
2504  debug("\tpattern: %d\n", pattern);
2505 
2506  //*
2507  // Forward computation
2508  //
2509  // Beware in case of parallel computation: this is a critical section
2510  //*
2511 
2512  for (i = 0; i<ninputs; i++) {
2513  setInput(i, trainingSet[pattern*ninputs + i]);
2514  }
2515 // debug("Before update (%s):\n",__PRETTY_FUNCTION__);
2516 // printAct();
2517  updateNet();
2518 
2519 // debug("after update:\n");
2520 // printAct();
2521 
2522 
2523  //*
2524  // Backward computation
2525  //*
2526 
2527  for(int m = noutputs-1; m>=0; m--) {
2528  if (!outputsToTrain[m])
2529  continue;
2530 
2531  int m_freep = m+ninputs+nhiddens;
2532 
2533  int col_idx = nconnections - 1;
2534  int row_idx = n_outputsToTrain*pattern-1;
2535 
2536  for (i = 0; i<=m; i++) {
2537  row_idx+= outputsToTrain[i];
2538  }
2539 
2540  //*
2541  // Computing error and jacobian relative to the output layer
2542  //*
2543 
2544  err[row_idx] = (desiredOutput[row_idx] - act[m_freep])*err_weights[m];
2545  delta = -derivative(m_freep, act[m_freep])*err_weights[m];
2546 
2547  //*
2548  // Iterating over output neurons
2549  //*
2550 
2551  for(i = noutputs-1; i>=0; i--) {
2552 
2553  if (!outputsToTrain[i])
2554  continue;
2555  //*
2556  // Iterating over net blocks in order to get the neurons connected with i-th output neuron
2557  //*
2558 
2559  for (b=net_nblocks-1; b>=0; b--) {
2560 
2561  //*
2562  // Check if current block is connected to i-th output
2563  //*
2564 // if (net_block[b][5]!=1 && inRange(i+ninputs+nhiddens,net_block[b][3],net_block[b][4]) ) {
2565  if (trainingHiddenBlock[net_block[b][3]][m] && net_block[b][5]==1) {
2566 
2567  for (j=net_block[b][3]+net_block[b][4] -1; j>=net_block[b][3]; j--) {
2568  if (i==m) {
2569  jacobian(row_idx, col_idx--) = delta *act[j];
2570  debug("\t\tcol_idx: %d\n", col_idx+1);
2571  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta,act[j],delta*act[j]);
2572  }
2573  else {
2574  jacobian(row_idx, col_idx--) = 0;
2575  debug("\t\tcol_idx: %d\n", col_idx+1);
2576  debug("\t\tjacobian(%d,%d) = 0\n", row_idx,col_idx+1);
2577  }
2578 
2579 
2580  } //end loop over j
2581 
2582  } // end if
2583 
2584  } //end loop over b
2585 
2586  //*
2587  // Consider the derivative of the error with respect to the bias of the output neuron
2588  //*
2589  if (neuronbias[i+ninputs+nhiddens]) {
2590  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta);
2591  jacobian(row_idx, col_idx--) = (i==m ? delta : 0);
2592  }
2593 
2594  } // end loop over i
2595 
2596  //*
2597  // Backpropagating the error over hidden neurons.
2598  // This follows the order of the blocks, from the last to the first.
2599  //
2600  // NOTE: this code assumes the net is a 3 layer net (input-hidden-output).
2601  // It will not work in case of nets with more than 1 hidden layer.
2602  //*
2603  debug("\nBackward computation: hidden layer\n");
2604 
2605 
2606  for (b=net_nblocks-1; b>=0; b--) {
2607 
2608  //*
2609  // If it's not a hidden block subject to training connected to the current (m-th) output
2610  //*
2611 
2612 // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
2613  debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
2614  if (net_block[b][0]!=0 || net_block[b][5] !=1 || ! trainingHiddenBlock[net_block[b][1]][m] )
2615  continue;
2616 
2617  //*
2618  // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
2619  //*
2620 
2621  for(j = net_block[b][1]+net_block[b][2]-1; j>= net_block[b][1]; j--) {
2622 
2623  double delta_h = delta* getWeight(m_freep, j) * derivative(j, act[j]);
2624 
2625  for (int k = net_block[b][3] + net_block[b][4]-1; k>= net_block[b][3]; k--) {
2626  jacobian(row_idx, col_idx--) = delta_h * act[k];
2627  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta_h,act[k],delta*act[k]);
2628  }
2629 
2630  if (neuronbias[j]) {
2631  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta_h);
2632  jacobian(row_idx, col_idx--) = delta_h;
2633  }
2634 
2635 
2636  }
2637 
2638 
2639 
2640  } //end loop over b
2641 
2642  } // end loop over m (output neurons)
2643 
2644  } //end pattern
2645 
2646 
2647 
2648  debug("\tAll rows analyzed\n");
2649 
2650 
2651 #ifdef BPDEBUG
2652 // std::cout<<"jacobian:\n"<<jacobian;
2653 // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
2654 
2655 // std::cout<<"\n\nerror: " << err.transpose() << "\n";
2656 #endif
2657 
2658  //*
2659  // new_weights = old_weights - (J^T J + lambda I)^-1 J^T e ;
2660  //*
2661 
2662  if (lambda > 100000000 || lambda < 0.000001) {
2663  lambda = 1;
2664  }
2665 
2666 
2667 // jt_we = jacobian.transpose();//*inv_error_weight;
2668  ww_err = jacobian.transpose()*err;
2669  jj = jacobian.transpose()*jacobian;
2670 // printf("det: %lf\n",jj.determinant());
2671 
2672 /* if (std::isnan((jacobian.transpose()*jacobian).determinant()) ) {
2673 
2674  printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
2675 
2676 
2677 
2678  FILE *fp = fopen("/Users/Manlio/Desktop/matrix.txt", "w");
2679 
2680  Eigen::MatrixXf pj(nconnections,nconnections);
2681 
2682  pj = jacobian.transpose()*jacobian;
2683 
2684 
2685  for(int i = 0;i<nconnections; i++)
2686  for(int j=0;j<nconnections; j++)
2687  if (!(std::isnormal(pj(i,j)) || pj(i,j)==0 ) ) {
2688  printf("(%d,%d) : %f\n",i,j,pj(i,j));
2689  }
2690 
2691 
2692 
2693  for (int a =0; a<nconnections; a++) {
2694  for (int b = 0; b<nconnections; b++) {
2695  fprintf(fp, "%f ", pj(a,b));
2696  }
2697  fprintf(fp, "\n");
2698  }
2699 
2700  fclose(fp);
2701  exit(0);
2702 
2703  }
2704 */
2705  for (int retry = 0; retry<6; retry++, lambda*=10) {
2706 
2707  debug("\tlambda: %f\n", lambda);
2708 
2709 // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
2710  new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
2711 
2712  // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
2713 // printf("norm wb2: %f\n",new_weights.norm());
2714 // exit(0);
2715 
2716 // std::cout<<"\n\nnew_weights: " << new_weights.transpose() << "\n";
2717 
2718 
2719  importWeightsFromVector(new_weights);
2720 
2721  currentError = computeMeanSquaredError(trainingSet, desiredOutput);
2722 
2723  printf("iteration: %d err: %f lambda: %f\n",cycles,currentError,lambda);
2724 
2725  debug("currentError: %f\n",currentError);
2726 
2727  if (currentError <= maxError)
2728  return currentError;
2729  if ((new_weights-old_weights).norm() < 0.0001) {
2730  printf("Minimum gradient reached\n");
2731  return currentError;
2732  }
2733 
2734  if (currentError > previousError) {
2735  importWeightsFromVector(old_weights);
2736  }
2737  else {
2738  previousError = currentError;
2739  lambda/=10;
2740  break;
2741  }
2742 
2743  }
2744 // exit(1);
2745  }
2746 
2747  training = false;
2748  return currentError;
2749 
2750  }
2751 
2752  float Evonet::trainLevembergMarquardtThroughTime(QVector<float> trainingSet, QVector<float> desiredOutput, int time, float maxError) {
2753 
2754  training = true;
2755 
2756  int pattern, b;
2757  // int lastBlock;
2758  int cycles,i, j;
2759 
2760  double lambda=0.001;
2761  double currentError = 0, previousError= 0;
2762  double delta;
2763 
2764  if (nconnections == 0) {
2765  training = false;
2766  printf("nconnections: 0\nnothing to train\n");
2767  return 0;
2768  }
2769 
2770  int nbiases = 0;
2771  for (i = 0; i<nneurons; i++) {
2772  nbiases += neuronbias[i]==1;
2773  }
2774 
2775  int size = trainingSet.size() / ninputs;
2776  debug("npatters: %d\n", size);
2777 
2778  Eigen::VectorXf oldActivations(time*nhiddens);
2779  oldActivations.setZero();
2780 
2781  Eigen::VectorXf err(size*n_outputsToTrain);
2782  Eigen::MatrixXf jacobian(size*n_outputsToTrain, nconnections );
2783 // Eigen::MatrixXf inv_error_weight(size*n_outputsToTrain,size*n_outputsToTrain);
2784 // Eigen::MatrixXf jt_we(size*n_outputsToTrain, nconnections );
2785  Eigen::MatrixXf jj(nconnections,nconnections);
2786 
2787  Eigen::VectorXf new_weights(nconnections);
2788  Eigen::VectorXf old_weights(nconnections);
2789  Eigen::VectorXf ww_err(nconnections);
2790 
2791 
2792 
2793  previousError = computeMeanSquaredError(trainingSet, desiredOutput);
2794  printf("Initial error: %f\n",previousError);
2795 
2796  /* if (previousError < maxError) {
2797  printf("Error already below threshold - Nothing to do.\n");
2798  return previousError;
2799  }
2800  */
2801 
2802 // int end = (maxIterations > 0 ? maxIterations : INFINITY);
2804  for (cycles = 0; cycles<end; cycles++) {
2805 
2806  jacobian.setZero();
2807 
2808  extractWeightsFromNet(old_weights);
2809  debug("weights extracted\n");
2810 
2811  //*
2812  // Iterating over patterns. This can be easily parallelized using OpenMP
2813  //*
2814 
2815  for (pattern=0; pattern<size; pattern++) {
2816 
2817  debug("\n\n------------\n\n");
2818  debug("\tpattern: %d\n", pattern);
2819 
2820  //*
2821  // Forward computation
2822  //
2823  // Beware in case of parallel computation: this is a critical section
2824  //*
2825 
2826  for (i = 0; i<ninputs; i++) {
2827  setInput(i, trainingSet[pattern*ninputs + i]);
2828  }
2829  // debug("Before update (%s):\n",__PRETTY_FUNCTION__);
2830  // printAct();
2831  updateNet();
2832 
2833  // debug("after update:\n");
2834  // printAct();
2835 
2836 
2837  //*
2838  // Backward computation
2839  //*
2840 
2841  for(int m = noutputs-1; m>=0; m--) {
2842 
2843  debug("m: %d\n", m);
2844  if (!outputsToTrain[m])
2845  continue;
2846 
2847  int m_freep = m+ninputs+nhiddens;
2848 
2849  int col_idx = nconnections - 1;
2850  int row_idx = n_outputsToTrain*pattern-1;
2851 
2852  debug("row_idx: %d\n", row_idx);
2853  for (i = 0; i<=m; i++) {
2854  row_idx+= outputsToTrain[i];
2855  }
2856  debug("row_idx: %d\n", row_idx);
2857  //*
2858  // Computing error and jacobian relative to the output layer
2859  //*
2860 
2861  err[row_idx] = (desiredOutput[row_idx] - act[m_freep])*err_weights[m];
2862  delta = -derivative(m_freep, act[m_freep])*err_weights[m];
2863 
2864  //*
2865  // Iterating over output neurons
2866  //*
2867 
2868  for(i = noutputs-1; i>=0; i--) {
2869 
2870  debug("\toutput: %d\n", i);
2871  if (!outputsToTrain[i])
2872  continue;
2873  //*
2874  // Iterating over net blocks in order to get the neurons connected with i-th output neuron
2875  //*
2876 
2877  for (b=net_nblocks-1; b>=0; b--) {
2878 
2879  //*
2880  // Check if current block is connected to i-th output
2881  //*
2882  // if (net_block[b][5]!=1 && inRange(i+ninputs+nhiddens,net_block[b][3],net_block[b][4]) ) {
2883  if (trainingHiddenBlock[net_block[b][3]][m]) {
2884 
2885  for (j=net_block[b][3]+net_block[b][4] -1; j>=net_block[b][3]; j--) {
2886  if (i==m) {
2887  jacobian(row_idx, col_idx--) = delta *act[j];
2888  debug("\t\tcol_idx: %d\n", col_idx+1);
2889  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta,act[j],delta*act[j]);
2890  }
2891  else {
2892  jacobian(row_idx, col_idx--) = 0;
2893  debug("\t\tcol_idx: %d\n", col_idx+1);
2894  debug("\t\tjacobian(%d,%d) = 0\n", row_idx,col_idx+1);
2895  }
2896 
2897 
2898  } //end loop over j
2899 
2900  } // end if
2901 
2902  } //end loop over b
2903 
2904  //*
2905  // Consider the derivative of the error with respect to the bias of the output neuron
2906  //*
2907  if (neuronbias[i+ninputs+nhiddens]) {
2908  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,(i==m ? delta : 0));
2909  jacobian(row_idx, col_idx--) = (i==m ? delta : 0);
2910  }
2911 
2912  } // end loop over i
2913 
2914  //*
2915  // Backpropagating the error over hidden neurons.
2916  // This follows the order of the blocks, from the last to the first.
2917  //
2918  // NOTE: this code assumes the net is a 3 layer net (input-hidden-output).
2919  // It will not work in case of nets with more than 1 hidden layer.
2920  //*
2921  debug("\nBackward computation: hidden layer\n");
2922 
2923 
2924  for (b=net_nblocks-1; b>=0; b--) {
2925 
2926  //*
2927  // If it's not a hidden block subject to training connected to the current (m-th) output
2928  //*
2929 
2930  // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
2931  debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
2932  if (net_block[b][0]!=0 || ! trainingHiddenBlock[net_block[b][1]][m] )
2933  continue;
2934 
2935  //*
2936  // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
2937  //*
2938 #if defined(__GNUC__) && defined(DEVELOPER_WARNINGS)
2939  #warning The trainLevembergMarquardtThroughTime method requires that all the connections to a particular hidden block are in the same net_block.
2940 #endif
2941 
2942  for(j = net_block[b][1]+net_block[b][2]-1; j>= net_block[b][1]; j--) {
2943 
2944  double delta_h = delta* getWeight(m_freep, j) * derivative(j, act[j]);
2945 
2946  for (int k = net_block[b][3] + net_block[b][4]-1; k>= net_block[b][3]; k--) {
2947 
2948  jacobian(row_idx, col_idx--) = delta_h * (isHidden(k) ? oldActivations[k-ninputs] : act[k]);
2949  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta_h,act[k],delta*act[k]);
2950  }
2951 
2952  if (neuronbias[j]) {
2953  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta_h);
2954  jacobian(row_idx, col_idx--) = delta_h;
2955  }
2956 
2957 
2958  }
2959 
2960 
2961 
2962  } //end loop over b
2963 
2964  } // end loop over m (output neurons)
2965 
2966 
2967  //*
2968  // Updating the old activations
2969  //*
2970 
2971  for (int i = 0; i<nhiddens; i++) {
2972  oldActivations[i] = act[i+ninputs];
2973  }
2974 
2975 
2976 
2977 
2978  } //end pattern
2979 
2980 
2981 
2982  debug("\tAll rows analyzed\n");
2983 
2984 
2985 #ifdef BPDEBUG
2986  // std::cout<<"jacobian:\n"<<jacobian;
2987  // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
2988 
2989  // std::cout<<"\n\nerror: " << err.transpose() << "\n";
2990 #endif
2991 
2992  //*
2993  // new_weights = old_weights - (J^T J + lambda I)^-1 J^T e ;
2994  //*
2995 
2996  if (lambda > 100000000 || lambda < 0.000001) {
2997  lambda = 1;
2998  }
2999 
3000 
3001  // jt_we = jacobian.transpose();//*inv_error_weight;
3002  ww_err = jacobian.transpose()*err;
3003  jj = jacobian.transpose()*jacobian;
3004  // printf("det: %lf\n",jj.determinant());
3005 
3006  /* if (std::isnan((jacobian.transpose()*jacobian).determinant()) ) {
3007 
3008  printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
3009 
3010 
3011 
3012  FILE *fp = fopen("/Users/Manlio/Desktop/matrix.txt", "w");
3013 
3014  Eigen::MatrixXf pj(nconnections,nconnections);
3015 
3016  pj = jacobian.transpose()*jacobian;
3017 
3018 
3019  for(int i = 0;i<nconnections; i++)
3020  for(int j=0;j<nconnections; j++)
3021  if (!(std::isnormal(pj(i,j)) || pj(i,j)==0 ) ) {
3022  printf("(%d,%d) : %f\n",i,j,pj(i,j));
3023  }
3024 
3025 
3026 
3027  for (int a =0; a<nconnections; a++) {
3028  for (int b = 0; b<nconnections; b++) {
3029  fprintf(fp, "%f ", pj(a,b));
3030  }
3031  fprintf(fp, "\n");
3032  }
3033 
3034  fclose(fp);
3035  exit(0);
3036 
3037  }
3038  */
3039  for (int retry = 0; retry<6; retry++, lambda*=10) {
3040 
3041  debug("\tlambda: %f\n", lambda);
3042 
3043  // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
3044  new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
3045 
3046  // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
3047  // printf("norm wb2: %f\n",new_weights.norm());
3048  // exit(0);
3049 
3050  // std::cout<<"\n\nnew_weights: " << new_weights.transpose() << "\n";
3051 
3052 
3053  importWeightsFromVector(new_weights);
3054 
3055  currentError = computeMeanSquaredError(trainingSet, desiredOutput);
3056 
3057  printf("iteration: %d err: %f lambda: %f\n",cycles,currentError,lambda);
3058 
3059  debug("currentError: %f\n",currentError);
3060 
3061  if (currentError <= maxError)
3062  return currentError;
3063  if ((new_weights-old_weights).norm() < 0.0001) {
3064  printf("Minimum gradient reached\n");
3065  return currentError;
3066  }
3067 
3068  if (currentError > previousError) {
3069  importWeightsFromVector(old_weights);
3070  }
3071  else {
3072  previousError = currentError;
3073  lambda/=10;
3074  break;
3075  }
3076 
3077  }
3078  // exit(1);
3079  }
3080 
3081  training = false;
3082  return currentError;
3083 
3084  }
3085 
3086  int Evonet::importWeightsFromMATLABFile(char *path) {
3087 
3088  //*
3089  // NOTE: This code has been written just to work. It assumes the net has
3090  // only two blocks to train: one connecting inputs with hidden neurons
3091  // and the other one connecting hidden with outputs.
3092  //*
3093 
3094  FILE *fp = fopen(path, "r");
3095 
3096  if (!fp)
3097  return -1 ;
3098 
3099  int biasptr = 0;
3100  int wptr = 0;
3101 
3102  for (int i = 0; i<nneurons; i++) {
3103  wptr += (neuronbias[i]==1);
3104  }
3105 
3106 
3107  int b;
3108 
3109  for (b = 0; b<net_nblocks; b++) {
3110  if (net_block[b][5]==1) {
3111  for (int i = 0; i<net_block[b][4]; i++) {
3112  for (int j = 0; j<net_block[b][2]; j++) {
3113  fscanf(fp, "%f", &freep[ wptr+i+j*net_block[b][4] ]);
3114 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
3115  }
3116  }
3117 
3118  wptr+=net_block[b][2]*net_block[b][4];
3119 
3120  biasptr=0;
3121 
3122  for (int j=0; j<net_block[b][1]; j++) {
3123  if (neuronbias[j]) {
3124  biasptr++;
3125  }
3126  }
3127 
3128  for (int i =0; i<net_block[b][2]; i++) {
3129  fscanf(fp, "%f", &freep[biasptr++]);
3130 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
3131  }
3132  }
3133  else if (net_block[b][0]==0) {
3134  wptr+= net_block[b][2]*net_block[b][4];
3135 /* for (int i = net_block[b][3]; i<net_block[b][3]+ net_block[b][4]; i++) {
3136  if (neuronbias[i]) {
3137  biasptr++;
3138  }
3139  }
3140 */
3141  }
3142  }
3143 
3144  fclose(fp);
3145 
3146  return 0;
3147  }
3148 
3149  int Evonet::exportWeightsToMATLABFile(char *path) {
3150 
3151  //*
3152  // NOTE: This code has been written just to work. It assumes the net has
3153  // only two blocks to train: one connecting inputs with hidden neurons
3154  // and the other one connecting hidden with outputs.
3155  //*
3156 
3157  FILE *fp = fopen(path, "w");
3158 
3159  if (!fp)
3160  return -1 ;
3161 
3162  int biasptr = 0;
3163  int wptr = 0;
3164 
3165  for (int i = 0; i<nneurons; i++){
3166  wptr += (neuronbias[i]==1);
3167  }
3168 
3169 
3170  int b;
3171 
3172  for (b = 0; b<net_nblocks; b++) {
3173  if (net_block[b][5]==1) {
3174  for (int i = 0; i<net_block[b][4]; i++) {
3175  for (int j = 0; j<net_block[b][2]; j++) {
3176  fprintf(fp, "%f\n", freep[ wptr+i+j*net_block[b][4] ]);
3177 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
3178  }
3179  }
3180 
3181  wptr+=net_block[b][2]*net_block[b][4];
3182 
3183  biasptr=0;
3184 
3185  for (int j=0; j<net_block[b][1]; j++) {
3186  if (neuronbias[j]) {
3187  biasptr++;
3188  }
3189  }
3190 
3191  for (int i =0; i<net_block[b][2]; i++) {
3192  fprintf(fp, "%f\n", freep[biasptr++]);
3193 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
3194  }
3195  }
3196  else if (net_block[b][0]==0) {
3197  wptr+= net_block[b][2]*net_block[b][4];
3198  /* for (int i = net_block[b][3]; i<net_block[b][3]+ net_block[b][4]; i++) {
3199  if (neuronbias[i]) {
3200  biasptr++;
3201  }
3202  }
3203  */
3204  }
3205  }
3206 
3207  fclose(fp);
3208 
3209  return 0;
3210  }
3211 
3213  //Serve ad impostare i pesi del riflesso
3214  //
3215  //Si basa su alcune assunzioni sulla struttura della rete. Non generale!
3216  //
3217 
3218  int p = 0;
3219  for (int i = 0; i<nneurons; i++) {
3220  p += neuronbias[i]==1;
3221  }
3222 
3223  freep[p++] = 0;
3224  freep[p++] = 5;
3225  freep[p++] = -5;
3226  freep[p] = 0;
3227 
3228 // printf("%s: p: %d\n",__PRETTY_FUNCTION__,p);
3229 // freep[14] = 0;
3230 // freep[15] = 0;
3231 
3232  }
3233 
3234 } // end namespace farsa
3235 
3236 
3237 // All the suff below is to restore the warning state on Windows
3238 #if defined(_MSC_VER)
3239  #pragma warning(pop)
3240 #endif
3241 
3242 
3243 
3244 /*
3245  for (i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
3246  err[i] = 0;
3247 
3248  for (j = net_block[b][1]; j<net_block[b][1]+net_block[b][2]; j++) {
3249  err[i] += freep[ngains+nbias+]
3250  }
3251  }
3252  for (i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
3253  if (t >= ninputs) {
3254  float dp_rate = delta[t] * rate;
3255  // calculate the delta for the lower neuron
3256  delta[i] += delta[t] * *p;
3257  // modify the weight
3258  *p += act[i] * dp_rate;
3259  }
3260  p++;
3261  }
3262 
3263  */
static const int MAXSTOREDACTIVATIONS
The maximum number of stored activation vectors.
Definition: evonet.h:133
void setNeckReflex()
Experiment specific function that set certain weights so to provide a given reflex behavior...
Definition: evonet.cpp:3212
void updateNet()
Update the state of the internal and motor neurons on the basis of: (i) the property of the neurons (...
Definition: evonet.cpp:834
void initWeightsInRange(float min, float max)
Initializes weights and biases randomly.
Definition: evonet.cpp:1247
float getFreeParameter(int i)
return the value of the ith parameter (normalized in the range [-wrange, wrange]
Definition: evonet.cpp:1075
float tansig(float f)
Hyperbolic tangent sigmoid transfer function.
Definition: evonet.cpp:769
int getNoInputs()
return the number of sensory neurons
Definition: evonet.cpp:1196
void printAct()
Print neurons activation state.
Definition: evonet.cpp:1592
int extractWeightsFromNet(Eigen::VectorXf &w)
Extract the connection weights and biases from the free parameters vector into a matrix to be used by...
Definition: evonet.cpp:1645
float getHidden(int h)
return the value of a hidden neuron (-999 if the specified id is out of range)
Definition: evonet.cpp:1061
static QString getString(ConfigurationParameters &params, QString paramPath, QString def=QString())
float getBackPropError()
Returns the Backpropagation algorithm error.
Definition: evonet.cpp:2050
void create_net_block(int inputNeuronType, int hiddenNeuronType, int outputNeuronType, bool recurrentHiddens, bool inputOutputConnections, bool recurrentOutputs, bool biasOnHidden, bool biasOnOutput)
Create the block structure that describe the architecture of the network (unless this structure is cr...
Definition: evonet.cpp:348
float getWrange()
return the value of wrange (which also determine the range in which all parameters are normalized) ...
Definition: evonet.cpp:1162
void printIO()
Print the activation state of sensory, internal, and motor neurons.
Definition: evonet.cpp:1132
float getWeight(int to, int from)
Return the weight that depart from neuron "from" and reach neuron "to".
Definition: evonet.cpp:1893
static const double Infinity
float trainLevembergMarquardtThroughTime(QVector< float > trainingSet, QVector< float > desiredOutput, int time, float maxError)
Train the network through the Levemberg Marquardt through time gradient descent algorithm.
Definition: evonet.cpp:2752
bool showTeachingInput()
Checks whether the teaching input has to be shown.
Definition: evonet.cpp:2031
void evonetUpdated()
emitted everytime the Evonet has been updated
Evonet()
The class constructor.
Definition: evonet.cpp:62
int nselected
Number of parameters selected through the graphic interface Enable the user to modify the parameters ...
Definition: evonet.h:467
void prepareForTraining(QVector< float > &err_w)
Initialize variables required by back-propagation training mconnections: number of weights and biases...
Definition: evonet.cpp:1945
void copyPheParameters(int *pheGene)
transorm floating point parameters normalized in the range [-wrange,range] into integer parameters in...
Definition: evonet.cpp:1120
int getNoNeurons()
return the total number of neurons
Definition: evonet.cpp:1211
float backPropStep(QVector< float > tInput, double rate)
Computes the neuron deltas for the Backpropagation algorithm.
Definition: evonet.cpp:2055
void setNetworkName(const QString &name)
Sets the name of this neural network.
Definition: evonet.cpp:169
FARSA_UTIL_TEMPLATE const T max(const T &t1, const U &t2)
float logistic(float f)
logistic function
Definition: evonet.cpp:764
void readOldPheLine(QStringList, float *, float *)
Assign to a free parameter and to the free parameter mutation rate the value extracted from a string ...
Definition: evonet.cpp:612
int setInput(int inp, float value)
set the value of a sensory neuron
Definition: evonet.cpp:1016
int load_net_blocks(const char *filename, int mode)
Load the description of the neural architecture from .net of .phe file (see the create_net_block meth...
Definition: evonet.cpp:515
void injectHidden(int nh, float val)
set the actiovation state of a hidden neuron
Definition: evonet.cpp:1054
static void describe(QString type)
Add to Factory::typeDescriptions() the descriptions of all parameters and subgroups.
Definition: evonet.cpp:323
void hardwire()
Experiment specific function that set the weights for a reflex and set the weights that should not be...
Definition: evonet.cpp:1355
static void throwUserConfigError(QString paramName, QString paramValue, QString description)
bool neuronlesion[MAXN]
a vector that speficy lesioned and unlesioned neurons
Definition: evonet.h:423
float getTeachingInputEntry(int id)
Returns the value of the teaching input element in id position.
Definition: evonet.cpp:2045
bool updateNeuronMonitor
Set to true if labels or colors have to be updated in the neuron monitor.
Definition: evonet.h:431
void computeParameters()
Compute the required number of free parameters on the basis of: (i) the property of the neurons (stor...
Definition: evonet.cpp:777
void printBlocks()
display the architecture structure
Definition: evonet.cpp:1178
int updateCounts()
return the number of updates (step) done
Definition: evonet.cpp:1234
float trainLevembergMarquardt(QVector< float > trainingSet, QVector< float > desiredOutput, float maxError)
Train the network through the Levemberg Marquardt gradient descent algorithm.
Definition: evonet.cpp:2442
float backPropStep2(QVector< float > tInput, double rate)
Computes the neuron deltas for the Backpropagation algorithm but does not update the weights...
Definition: evonet.cpp:2194
static const int MAXN
Maximum number of neurons for a neural network of this type.
Definition: evonet.h:135
static bool getBool(ConfigurationParameters &params, QString paramPath, bool def=false)
void activateMonitorUpdate()
the labels of the neurons displayed by the graphic widget
Definition: evonet.cpp:2036
static void info(QString msg)
static void error(QString msg)
void setWeight(int to, int from, float w)
Set the weight that depart from neuron "from" and reach neuron "to".
Definition: evonet.cpp:1918
void initWeightsNguyenWidrow(float min, float max)
Initializes weights and biases according to the Nguyen-Widrow initialization algorithm.
Definition: evonet.cpp:1301
QColor neurondcolor[MAXN]
the color used to display the actiovation state of each neuron in the neuron monitor widget ...
Definition: evonet.h:419
int neuronlesions
Whether one or more neurons have been lesioned.
Definition: evonet.h:471
int neurondisplay[MAXN]
the vectors that specify for each neuron whether it should be displayed or not by the neuron monitor ...
Definition: evonet.h:410
bool startObjectParameters(QString groupPath, QString typeName, ParameterSettable *object)
float neuronlesionVal[MAXN]
the value to be assigned to the state of lesioned neurons
Definition: evonet.h:427
int getParamBias(int nbias)
return the value of a bias
Definition: evonet.cpp:1153
bool pheFileLoaded()
check whether a .phe file (with parameters description) has been loaded
Definition: evonet.cpp:1080
float getInput(int in)
return the value of a sensory neuron
Definition: evonet.cpp:1033
float getNeuron(int in)
return the activation of a neuron
Definition: evonet.cpp:1038
void getMutations(float *mut)
set the mutation vector of the genetic algorithm that determine how parameters are mutated ...
Definition: evonet.cpp:1112
void deactivateMonitorUpdate()
deactivate the neurons monitor update
Definition: evonet.cpp:2040
static double getDouble(ConfigurationParameters &params, QString paramPath, double def=0)
static Descriptor addTypeDescription(QString type, QString shortHelp, QString longHelp=QString(""))
void readNewPheLine(QStringList, float *, float *)
Assign to a free parameter and to the free parameter mutation rate the value extracted from a string ...
Definition: evonet.cpp:621
void configure(ConfigurationParameters &params, QString prefix)
Configures the object using a ConfigurationParameters object.
Definition: evonet.cpp:179
float * getOldestStoredActivations()
Returns the oldest stored activation vector and removes it.
Definition: evonet.cpp:1223
const QString & getNetworkName() const
Returns the name of this neural network.
Definition: evonet.cpp:174
void setRanges(double weight, double bias, double gain)
set the range of connection weights, biases, and gains
Definition: evonet.cpp:1216
Evonet is the neural network taken from the EvoRobot.
Definition: evonet.h:121
void endTraining()
de-allocate variables create for training
Definition: evonet.cpp:2023
int getNoHiddens()
return the number of internal neurons
Definition: evonet.cpp:1201
void save_net_blocks(const char *filename, int mode)
Save the description of the neural architecture into a .net of .phe file (see the create_net_block me...
Definition: evonet.cpp:658
int importWeightsFromVector(Eigen::VectorXf &w)
Copy back the weights and biases trained through the Levenberg Marquardt Algorithm into the free para...
Definition: evonet.cpp:1774
float getOutput(int out)
return the value of a motor neuron (-1 if the specified id is out of range)
Definition: evonet.cpp:1025
void updateWeightsAfterBackProp()
Updates the weights of the ANN.
Definition: evonet.cpp:2364
FARSA_UTIL_TEMPLATE const T min(const T &t1, const U &t2)
int maxIterations
Maximum number of training iteration.
Definition: evonet.h:649
float computeMeanSquaredError(QVector< float > trainingSet, QVector< float > desiredOutput)
Update the network and compute and return the mean squared error.
Definition: evonet.cpp:1601
int freeParameters()
return number of defined free parameters
Definition: evonet.cpp:1070
static int getInt(ConfigurationParameters &params, QString paramPath, int def=0)
float * getBackPropWeightModification()
Returns the array of weight/bias updates after backpropagation.
Definition: evonet.cpp:2390
void printWeights()
Print all free parameters.
Definition: evonet.cpp:1239
ParameterSettableUI * getUIManager()
Return a new instance of the EvonetUI.
Definition: evonet.cpp:343
float getGrange()
return the value of grange
Definition: evonet.cpp:1172
The class with data exchanged with the GUI.
Definition: evonet.h:73
float ** selectedp
pointer to the list pointer-to-parameters selected through the graphic interface Enable the user to m...
Definition: evonet.h:462
int getNoOutputs()
return the number of motor neurons
Definition: evonet.cpp:1206
void createParameter(QString groupPath, QString parameter)
static void warning(QString msg)
void initBackPropFreep()
Initializes backpropfreep for applying backPropStep2 algorithm.
Definition: evonet.cpp:2351
void setParameters(const float *dt)
set the free parameters on the basis of a genotype string
Definition: evonet.cpp:1088
void resetNet()
reset to 0.0 the activation state of all neurons
Definition: evonet.cpp:1043
float getBrange()
return the value of brange
Definition: evonet.cpp:1167
double neuronrange[MAXN][2]
the matrix that contain the variation range of neurons used by the neuron monitor graphic widget ...
Definition: evonet.h:415
void calculateBackPropagationError(QVector< float > tInput)
Computes the Backpropagation error (the backpropagation algorithm is not applied to the ANN) ...
Definition: evonet.cpp:2334
void save(ConfigurationParameters &params, QString prefix)
Saves the actual status of parameters into the ConfigurationParameters object passed.
Definition: evonet.cpp:309
static const float DEFAULT_VALUE
DEFAULT_VALUE is used for do not assign values to mut and parameters.
Definition: evonet.h:137