Okay. This is the code for the three classes: neuron, layer and perceptron. I haven't finished the perceptron class yet, and it is likely that I'll have to tweak the layer class too. I've implemented the operator[] but I'm not sure if what I did makes sense.

Let me know the areas where I can improve the code... thanks

Code:

// neuron.h
////////////////////////////////
#ifndef __NEURON_H_
#define __NEURON_H_
class neuron
{
public:
enum typeOfWeight {kFIXED, kVARIABLE};
neuron();
neuron(double *pInputs, typeOfWeight type, int numberOfInputs);
~neuron();
void setNode (double *pInputs, typeOfWeight type, int numberOfInputs);
void setInputs(double *pInputs);
void setNumOfInputs(int numInConnections);
void setWeights(typeOfWeight type);
double getOutput(void);
void updateWeights(void);
protected:
private:
double *pmInputs;
double *pmWeights;
double mOutput;
int mNumInConnections;
double rndWeight(int const &min, int const &max);
};
#endif \\ __NEURON_H_

Code:

// neuron.cpp
////////////////////////////////
#include "neuron.h"
#include <iostream>
#include <stdlib.h> // required to call rand() function
#include <time.h> // used to return the current time - used
// as seed for the rand() function
neuron::neuron()
{
}
neuron::neuron(double *pInputs, typeOfWeight type, int numberOfInputs)
{
int i = 0;
mNumInConnections = numberOfInputs;
pmInputs = new double[mNumInConnections];
pmWeights = new double[mNumInConnections];
// set inputs
for (i = 0; i < mNumInConnections; i++)
{
//*(pmInputs + i) = *(pInputs + i);
pmInputs[i] = pInputs[i];
}
// set weights
setWeights(type);
}
neuron::~neuron()
{
// free the memory allocated by 'new double[]'
delete[] pmInputs;
pmInputs = 0;
delete[] pmWeights;
pmWeights = 0;
}
void neuron::setNode (double *pInputs, typeOfWeight type, int numberOfInputs)
{
int i = 0;
mNumInConnections = numberOfInputs;
pmInputs = new double[mNumInConnections];
pmWeights = new double[mNumInConnections];
// set inputs
for (i = 0; i < mNumInConnections; i++)
{
//*(pmInputs + i) = *(pInputs + i);
pmInputs[i] = pInputs[i];
}
// set weights
setWeights(type);
}
void neuron::setInputs(double *pInputs)
{
int i = 0;
pmInputs = new double[mNumInConnections];
// set inputs
for (i = 0; i < mNumInConnections; i++)
{
//*(pmInputs + i) = *(pInputs + i);
pmInputs[i] = pInputs[i];
}
}
void neuron::setNumOfInputs(int numInConnections)
{
mNumInConnections = numInConnections;
}
void neuron::setWeights(typeOfWeight type)
{
int i = 0;
if (type == kFIXED)
{
for (i = 0; i < mNumInConnections; i++)
{
//*(pmWeights + i) = *(pWeights + i);
pmWeights[i] = 1;
}
}
if (type == kVARIABLE)
{
for (i = 0; i < mNumInConnections; i++)
{
//*(pmWeights + i) = *(pWeights + i);
pmWeights[i] = rndWeight(0, 1);
}
}
}
double neuron::getOutput(void)
{
int i = 0;
double sum = 0;
double input = 0;
double weight = 0;
// calculate the sum of inputs x weights
for (i = 0; i < mNumInConnections; i++)
{
//input = *(pmInputs + i);
//weight = *(pmWeights + i);
input = pmInputs[i];
weight = pmWeights[i];
sum = sum + (input * weight);
}
// assess the neuron output
if (sum > 0)
mOutput = 1;
if (sum <= 0)
mOutput = 0;
// return the result of the calculation
return mOutput;
}
double neuron::rndWeight(int const &min, int const &max)
{
// RAND_MAX = 0x7fff = 32767 --- defined in <stdlib.h>
// below is: min + ((rand() / RAND_MAX) * (max - min))
// the expression was divided into a, b, c so to avoid integer type cast;
double a = double(rand()) / double(RAND_MAX);
double b = double(max - min);
double c = double(a * b);
double expression = min + c;
//acutPrintf("\nexpression: %f", expression); // for debugging
return expression;
}

Code:

// layer.h
////////////////////////////////
#ifndef __LAYER_H_
#define __LAYER_H_
#include "neuron.h"
class layer
{
public:
layer();
layer(int numOfNodes, int numberOfInputs, double *inputs, neuron::typeOfWeight type);
~layer();
void setNodeInputs(double *pInputs, int nodeIndex);
void setNumOfNodes(int numNodes);
double getNodeOutput(int nodeIndex);
neuron &operator[](int index); // if reference '&' I can use the '.' operator
// if pointer '*' I will have to use '->' operator
protected:
private:
int mNumOfNodes;
int mNumOfInputs;
double *pmInputs;
neuron *pmNeurons;
};
#endif // __LAYER_H_

Code:

// layer.cpp
////////////////////////////////
#include "layer.h"
#include <iostream>
layer::layer()
{
// not implemented yet
}
layer::layer(int numOfNodes, int numberOfInputs, double *inputs, neuron::typeOfWeight type)
{
int i = 0;
mNumOfNodes = numOfNodes;
mNumOfInputs = numberOfInputs;
pmInputs = inputs;
pmNeurons = new neuron[mNumOfNodes]();
for (i = 0; i < mNumOfNodes; i++)
{
pmNeurons[i].setNode(pmInputs, type, numberOfInputs);
}
}
layer::~layer()
{
delete [] pmNeurons;
pmNeurons = 0;
}
void layer::setNodeInputs(double *pInputs, int nodeIndex)
{
}
void layer::setNumOfNodes(int numNodes)
{
mNumOfNodes = numNodes;
pmNeurons = new neuron[mNumOfNodes]
}
neuron& layer::operator[](int index)
{
if (index > mNumOfNodes)
std::cout << "ERROR...! neuron.operator[]\n\n";
else
return pmNeurons[index];
}

Code:

// perceptron.h
////////////////////////////////
#ifndef __PERCEPTRON_H_
#define __PERCEPTRON_H_
#include "layer.h"
#include <queue>
class perceptron
{
public:
perceptron( int numLayers = 2,
int numNodesLayer1 = 1,
int numNodesLayer2 = 1);
~perceptron();
void training(queue pair); // pair is a queue containing inputs and their targets.
// inputs and targets can be part of a structure.
// inputs are arrays of double.
// targets are array of binary numbers (0s or 1s).
layer &operator[](int index);
protected:
private:
int mNumOfLayers;
layer *pmLayers;
int mNumTrainingPatterns;
};
#endif // __PERCEPTRON_H_

Code:

// perceptron.cpp
////////////////////////////////
#include "perceptron.h"
#include <iostream>
perceptron::perceptron(int numLayers,
int numNodesLayer1,
int numNodesLayer2)
{
mNumOfLayers = numLayers;
layer *pmLayers = new layer[mNumOfLayers];
pmLayers[0].setNumOfNodes(pLayer1Inputs, i);
pmLayers[1].setNumOfNodes(pLayer2Inputs, i);
}
perceptron::~perceptron()
{
delete [] pmLayers;
pmLayers = 0;
}
// pair is a queue containing inputs and their targets.
// inputs and targets can be part of a structure.
// inputs are arrays of double.
// targets are array od binary numbers (0s or 1s).
perceptron::training(queue pair)
{
int iterator = 0;
int count = 0;
// initialise network's size and initial weights
// Start the training loop
// populate the network input layer with the next pattern to learn.
// calculate the input layer's nodes output. Note that the nodes in the
// input layer are not processing nodes. Hence, nodes.output = node.input.
// calculate the output of the nodes in the output layer.
// update the weight of the connections between the nodes in the input
// layer and the output layer.
// check the network average error
// calculate the error for the pattern used for training
// if the network has iterated through all the objects in the pool:
// 1. Calculate the network average error for all the patterns.
// 2. Store the average in a vector (i.e. dynamic array) so
// so it can be used to analyse the performance of the network.
// 3. If average <= threshold exit training
// else continue training
}
layer& perceptron::operator[](int index)
{
if (index > mNumOfLayers)
std::cout << "ERROR...! layer.operator[]\n\n";
else
return pmLayers[index];
}