Code:
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<pthread.h>
#define INPUT_NEURONS 256
#define HIDDEN_NEURONS 150
#define OUTPUT_NEURONS 10
#define CONTEXT_NEURONS 150
#define no_of_clients 10
#define no_trng_vectors 1593
int HIDDENNEURONS = HIDDEN_NEURONS/no_of_clients;
// Activations.
double inputs[INPUT_NEURONS];
double hidden[HIDDEN_NEURONS]={0};
double target[OUTPUT_NEURONS];
double actual[OUTPUT_NEURONS];
double context[CONTEXT_NEURONS]={0};
static double outputs_from_file[no_trng_vectors][OUTPUT_NEURONS];
static double inputs_from_file[no_trng_vectors][INPUT_NEURONS];
double batch_deltawho[no_trng_vectors][INPUT_NEURONS][OUTPUT_NEURONS];
double batch_deltawih[no_trng_vectors][INPUT_NEURONS][HIDDEN_NEURONS];
double batch_erro[no_trng_vectors][OUTPUT_NEURONS];
typedef struct{
int cli_index;
int sam;
}cli_var;
double temp = 0.0;
int itteration;
int stop = 0;
double LEARN_RATE = 0.2; // Rho.
//-------------------------------------------------------------
// Input to Hidden Weights (with Bias es).
//double wih[INPUT_NEURONS][HIDDEN_NEURONS]={{0.840188,0.394383,0.783099},{0.798440,0.911647,0.197551},{0.335223,0.768230,0.277775},{0.553970,0.477397,0.628871},{0.364784,0.513401,0.952230},{0.916195,0.635712,0.717297}
// Context to Hidden Weight (with Biases).
//double wch[CONTEXT_NEURONS][HIDDEN_NEURONS]={{0.141603,0.606969,0.016301},{0.242887,0.137232,0.804177},{0.156679,0.400944,0.129790}
// Hidden to Output Weights (with Biases).
//double who[HIDDEN_NEURONS][OUTPUT_NEURONS]={{0.108809,0.998925,0.218257,0.512932,0.839112,0.612640},{0.296032,0.637552,0.524287,0.493583,0.972775,0.292517},{0.771358,0.526745,0.769914,0.400229,0.891529,0.283315}};
double wih[INPUT_NEURONS][HIDDEN_NEURONS];
double wch[CONTEXT_NEURONS][HIDDEN_NEURONS];
double who[HIDDEN_NEURONS][OUTPUT_NEURONS];
// Hidden to Context Weights (no Biases).
double whc[OUTPUT_NEURONS][CONTEXT_NEURONS];
// Unit errors.
double erro[OUTPUT_NEURONS];
double errh[HIDDEN_NEURONS];
int NUM_THREADS=no_of_clients+1;
pthread_t threads[11];
int a[2]={0,0};
double fRand(double,double);
double sigmoid(double);
double sigmoidDerivative(double);
void backPropagate();
void assignRandomWeightsIH();
void assignRandomWeightsCH();
void assignRandomWeightsHC();
void assignRandomWeightsHO();
void storeError(int);
void elmanNetwork();
void *server(void *);
void *client(void *);
main()
{
int noofinp=0,noofout=0;
struct timeval tv1,tv2;
int hid,out;
unsigned long long startingtime,endingtime;
gettimeofday(&tv1,NULL);
startingtime=tv1.tv_usec + tv1.tv_sec *1000000;
printf("starting time in seconds:");
printf("%llu",startingtime);
FILE *fp;
int i,l,k;
fp=fopen("read.txt","r");
/*fscanf(fp,"%d",&INPUT_NEURONS);
fscanf(fp,"%d",&HIDDEN_NEURONS);
fscanf(fp,"%d",&CONTEXT_NEURONS);
fscanf(fp,"%d",&OUTPUT_NEURONS); */
fscanf(fp,"%lf",&LEARN_RATE);
//fscanf(fp,"%d",&no_trng_vectors);
/*for(i=0;i<no_trng_vectors;i++)
{
for (l = 0; l < INPUT_NEURONS; l++){
fscanf (fp, "%lf", &inputs_from_file[i][l]);
//printf("----%lf",inputs_from_file[i][l]);
}
printf("\n");
for (k = 0; k<OUTPUT_NEURONS; k++){
fscanf (fp, "%lf", &outputs_from_file[i][k]);
}
}*/
for(i=0,noofinp=0,noofout=0;i<no_trng_vectors;i++)
{
for (l = 0; l < INPUT_NEURONS; l++)
{
if(noofinp!=256)
{
fscanf (fp, "%lf", &inputs_from_file[i][l]);
noofinp++;
}
//printf("----%lf",inputs_from_file[i][l]);
}
for(k = 0; k<OUTPUT_NEURONS; k++)
{
if(noofout!=10)
{
fscanf (fp, "%lf", &outputs_from_file[i][k]);
noofout++;
}
}
}
/*for(i=0;i<no_trng_vectors;i++) //accepting inputs nd outputs from file
{
for (l = 0; l < INPUT_NEURONS; l++){
printf("inputs----%lf",inputs_from_file[i][l]);
printf("\t");
}
printf("\n");
for (k = 0; k<OUTPUT_NEURONS; k++){
printf ("outputs----%lf", outputs_from_file[i][k]);
printf("\t");
}
printf("\n");
}*/
fclose(fp);
assignRandomWeightsHC();
assignRandomWeightsHO();
assignRandomWeightsIH();
assignRandomWeightsCH();
elmanNetwork();
printf("\nnetwork is trained at %lf",temp);
/*for(out = 0; out < OUTPUT_NEURONS; out++)
{
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
//printf("%lf", who[hid][out]);
//printf("...........");
}
}*/
gettimeofday(&tv2,NULL);
endingtime=tv2.tv_usec + tv2.tv_sec *1000000;
printf("\nendingtime:%llu",endingtime);
if(startingtime<endingtime)
printf("\ntime taken in parallel code using pthreads:%llu\n",(endingtime-startingtime));
else
printf("\ntime taken in parallel code using pthreads:%llu\n",(startingtime-endingtime));
}
void elmanNetwork()
{
int i,j,cli_index;
int inp,out;
int batch_count=0;
cli_var cli_val1;
while(!stop)
{
int sample=0;
batch_count++;
/*if(batch_count==60){
break;
}*/
for(j=0;j<no_trng_vectors;j++)
{
for(inp=0;inp<INPUT_NEURONS;inp++)
{
inputs[inp]=inputs_from_file[sample][inp];
//printf("-------%lf",inputs[inp]);
}
for(out=0;out<OUTPUT_NEURONS;out++)
{
target[out]=outputs_from_file[sample][out];
}// creating client thread.
/*for(i=0,cli_val1.cli_index=0;i<NUM_THREADS-1;i++,cli_val1.cli_index++)
{
pthread_create(&threads[i],NULL,client,(void *)&cli_val1);
//pthread_join(threads[i],NULL);
}*/
//printf(".......");
// creating server thread.
pthread_create(&threads[0],NULL,server,(void *)sample);
pthread_join(threads[0],NULL);
//a[0]=0;
//a[1]=0;
sample++;
}
//printf("\n batch_count:%d",batch_count);
}
return;
}
void *client(void * index)
{
cli_var *cli_var2;
cli_var2=(cli_var*)index;
int con;
double sum = 0.0;
int hid,inp,out;
//printf("\n inside client,index %d", cli_var2->cli_index);
int sample=cli_var2->sam;
/*for(inp=0;inp<INPUT_NEURONS;inp++)
{
inputs[inp]=inputs_from_file[sample][inp];
//printf("-------%lf",inputs[inp]);
}
for(out=0;out<OUTPUT_NEURONS;out++)
{
target[out]=outputs_from_file[sample][out];
}*/
// calculate inputs to hidden layer using wih & wch
// part of feed forward
for(hid = (cli_var2->cli_index)*HIDDENNEURONS; hid < ((cli_var2->cli_index)+1)*HIDDENNEURONS; hid++)
{
sum = 0.0;
for(inp = 0; inp < INPUT_NEURONS; inp++) // from input to hidden...
{
sum += inputs[inp] * wih[inp][hid];
//printf("\n sum: %lf",sum);
} // inp
for(con = 0; con < CONTEXT_NEURONS; con++) // from context to hidden...
{
sum += context[con] * wch[con][hid];
//printf("\n sum with con: %lf",sum);
} // con
//sum += wih[INPUT_NEURONS][hid]; // Add in bias.
//sum += wch[CONTEXT_NEURONS][hid];
hidden[hid] = sigmoid(sum);
} // hid
//a[cli_var2->cli_index]=1;
}
void assignRandomWeightsIH()
{
int inp,hid;
for(inp = 0; inp < INPUT_NEURONS; inp++) // Do not subtract 1 here.
{
for(hid = 0; hid <HIDDEN_NEURONS; hid++)
{
// Assign a random weight value between 0 and 1
wih[inp][hid] = fRand(0,1);
//printf("wih:%lf",wih[inp][hid]);
} // hid
//printf("\n");
} // inp
return;
}
void assignRandomWeightsCH()
{
int con,hid;
for(con = 0; con < CONTEXT_NEURONS; con++)
{
for(hid = 0; hid <HIDDEN_NEURONS; hid++)
{
// Assign a random weight value between 0 and 1
wch[con][hid] = fRand(0,1);
//printf("wch:%lf",wch[con][hid]);
} // hid
//printf("\n");
} // con
return;
}
void *server(void *index)
{
double err;
double sum;
cli_var cli_val1;
int sample=(int)index;
int out,hid,con,count,inp;
int i,l,j;
cli_val1.sam=sample;
//printf("\n inside server,index %d", cli_var2->cli_index);
//printf("\n inside server,sample %d", sample);
for(i=1,cli_val1.cli_index=0;i<NUM_THREADS;i++,cli_val1.cli_index++)
{
//printf("client index %d",cli_val1.cli_index);
pthread_create(&threads[i],NULL,client,(void *)&cli_val1);
//pthread_join(threads[i],NULL);
}
pthread_join(threads[i-1],NULL);
/*if(!(a[0]&&a[1]))
{
printf("sleeping");
sleep(10);
}*/
//while(1){
//while(sample!=no_trng_vectors){
// Train the network.
//itteration++;
// Calculate the hidden to output layer.
// part of feed forward
for(hid = 0; hid < HIDDEN_NEURONS; hid++){
//printf("hidden: %lf",hidden[hid]);
}
for(out = 0; out < OUTPUT_NEURONS; out++)
{
sum = 0.0;
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
sum += hidden[hid] * who[hid][out];
} // hid
//sum += who[HIDDEN_NEURONS][out]; // Add in bias.
actual[out] = sigmoid(sum);
} // out
// Copy outputs of the hidden to context layer.
for(con = 0; con < CONTEXT_NEURONS; con++)
{
context[con] = hidden[con];
}
storeError(sample);
//sample++;
//}
if(sample==no_trng_vectors-1){
err = 0.0;
for(j=0; j < no_trng_vectors; j++){
for(i = 0; i < OUTPUT_NEURONS; i++)
{
//err += Math.sqrt(target_vector[i] - output_vector[i]);
err += batch_erro[j][i]*batch_erro[j][i];
}
}
err = 0.5 * err/no_trng_vectors;
//printf("err:%lf",err);
if(temp>0.0){
if(temp-err>0.2){
LEARN_RATE=LEARN_RATE-LEARN_RATE*0.1;
}else if(temp-err<0.2){
LEARN_RATE=LEARN_RATE+LEARN_RATE*0.1;
}
}
temp=err;
if(err<0.05)
{
stop = 1;
//printf(".....%lf....",err);
//break;
}
else{
backPropagate();
//sample=0;
itteration++;
return;
}
}
//}
//printf("itterations :%d",itteration);
return;
}
void assignRandomWeightsHC()
{
int con,out;
for(out = 0; out < OUTPUT_NEURONS; out++)
{
for(con = 0; con < CONTEXT_NEURONS; con++)
{
// These are all fixed weights set to 0.5
whc[out][con] = 0.5;
} // con
} // out
return;
}
void assignRandomWeightsHO()
{
int hid,out;
for(hid = 0; hid < HIDDEN_NEURONS; hid++) //Do not subtract 1 here.
{
for(out = 0; out < OUTPUT_NEURONS; out++)
{
// Assign a random weight value between 0 and 1
who[hid][out] = fRand(0,1);
//printf("who:%lf",who[hid][out]);
} // out
//printf("\n");
} // hid
return;
}
void storeError(int trng_vector_index)
{
int count1=0;
int out,hid,inp;
int i;
//printf("\n storing error for trngvectorind %d ",trng_vector_index);
// Calculate the output layer error (step 3 for output cell).
for(out = 0; out < OUTPUT_NEURONS; out++)
{
batch_erro[trng_vector_index][out] = (target[out] - actual[out]);
erro[out] = (target[out] - actual[out]) * sigmoidDerivative(actual[out]);
}
// Calculate the hidden layer error and updating (step 3 for hidden cell).
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
errh[hid] = 0.0;
for(out = 0; out < OUTPUT_NEURONS; out++)
{
errh[hid] += erro[out] * who[hid][out];
} // out
errh[hid] *= sigmoidDerivative(hidden[hid]);
} // hid
// Update the weights for the output layer (step 4).
for(out = 0; out < OUTPUT_NEURONS; out++)
{
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
batch_deltawho[trng_vector_index][hid][out] += (LEARN_RATE * erro[out] * hidden[hid]);
//who[hid][out] += (LEARN_RATE * erro[out] * hidden[hid]);
} // hid
//who[HIDDEN_NEURONS][out] += (LEARN_RATE * erro[out]); // Update the bias.
} // out
// Update the weights for the hidden layer (step 4).
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
for(inp = 0; inp < INPUT_NEURONS; inp++)
{
batch_deltawih[trng_vector_index][inp][hid] += (LEARN_RATE * errh[hid] * inputs[inp]);
//wih[inp][hid] += (LEARN_RATE * errh[hid] * inputs[inp]);
} // inp
//wih[INPUT_NEURONS][hid] += (LEARN_RATE * errh[hid]); // Update the bias.
} // hid
}
void backPropagate()
{
int count1=0;
int out,hid,inp;
int trng_vector_ind;
int i;
// Calculate the output layer error (step 3 for output cell).
for(out = 0; out < OUTPUT_NEURONS; out++)
{
erro[out] = (target[out] - actual[out]) * sigmoidDerivative(actual[out]);
}
// Calculate the hidden layer error and updating (step 3 for hidden cell).
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
errh[hid] = 0.0;
for(out = 0; out < OUTPUT_NEURONS; out++)
{
errh[hid] += erro[out] * who[hid][out];
} // out
errh[hid] *= sigmoidDerivative(hidden[hid]);
} // hid
// Update the weights for the output layer (step 4).
for(out = 0; out < OUTPUT_NEURONS; out++)
{
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
for(trng_vector_ind =0; trng_vector_ind<no_trng_vectors; trng_vector_ind++){
who[hid][out] += batch_deltawho[trng_vector_ind][hid][out];
}
who[hid][out]=who[hid][out]/no_trng_vectors;
//who[hid][out] += (LEARN_RATE * erro[out] * hidden[hid]);
} // hid
//who[HIDDEN_NEURONS][out] += (LEARN_RATE * erro[out]); // Update the bias.
} // out
// Update the weights for the hidden layer (step 4).
for(hid = 0; hid < HIDDEN_NEURONS; hid++)
{
for(inp = 0; inp < INPUT_NEURONS; inp++)
{
for(trng_vector_ind =0; trng_vector_ind<no_trng_vectors; trng_vector_ind++){
wih[inp][hid] += batch_deltawih[trng_vector_ind][inp][hid];
}
wih[inp][hid]=wih[inp][hid]/no_trng_vectors;
//wih[inp][hid] += (LEARN_RATE * errh[hid] * inputs[inp]);
} // inp
//wih[INPUT_NEURONS][hid] += (LEARN_RATE * errh[hid]); // Update the bias.
} // hid
}
double fRand(double fMin, double fMax)
{
double f = (double)rand()/ RAND_MAX;
return fMin + f * (fMax - fMin);
}
double sigmoid(double val)
{
return (1.0 / (1.0 +exp(-val)));
}
double sigmoidDerivative(double val)
{
return (val * (1.0 - val));
}