MPI help

This is a discussion on MPI help within the C Programming forums, part of the General Programming Boards category; Code: #include <stdio.h> #include <mpi.h> #include <stdlib.h> // Codeblock v8 compatibility for time calculation #define NRA 3 // Number of ...

  1. #1
    Registered User
    Join Date
    Apr 2006
    Posts
    13

    Question MPI help

    Code:
    #include <stdio.h>
    #include <mpi.h>
    #include <stdlib.h>             // Codeblock v8 compatibility for time calculation
    #define NRA 3                   // Number of rows in matrix A
    #define NCA 3                   // Number of columns in matrix A
    #define NCB 3                   // Number of columns in matrix B
    #define MASTER 0                // Master process id
    #define FROM_MASTER 1           // Message tag from master
    #define FROM_SLAVE  2           // Message tag from slave
    
    int main(int argc,char* argv[])
    {
        int	process,                // Number of tasks in partition
    	myrank,                     // Process ID
    	slave_process,              // Number of slave tasks
    	source,                     // Process id of message source
    	dest,                       // Process id of message destination
    	mtype,                      // Message tag
    	rows,                       // Rows of matrix A sent to each worker
    	averow, extra, offset,      // Variables to set rows to be sent to slave processes
    	i, j, k;                    // Multi-purpose counter variables (for loop counters - multi-d arrays)
    	double starttime,           // Begining of execution time variable
               endtime,             // Ending of execution time variable
               elapsed;             // Total elapsed execution time
    
    	int a[NRA][NCA],            // Matrix A to be multiplied
            b[NCA][NCB],            // Matrix B to be multiplied
            c[NRA][NCB];            // Result matrix C
    
    
        // MPI environment intiation
        MPI_Init(&argc,&argv);
        MPI_Comm_size(MPI_COMM_WORLD,&process);
        MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
        MPI_Status status;
    
    
        printf ("Process ID = %d\n", myrank);     // Display all process available at runtime
        slave_process = process - 1;              // Total number of slave process
    
        /***************************** Master Process Computation *************************************/
    
        if (myrank == MASTER)
        {
           starttime = MPI_Wtime();              // Begin of execution time
           for (i=0; i<NRA; i++)
             for (j=0; j<NCA; j++)
                a[i][j]= rand() % 10; // change
                {
                   // Display Matrix A
                   printf("\n   Matrix A \n");
                   printf("--------------\n");
                   for(i=0; i<NRA; ++i)
                   {
                    for(j=0; j<NCA; ++j)
                      printf("%3d", a[i][j]);
                      printf("\n");
                   }
                }
    
          for (i=0; i<NCA; i++)
             for (j=0; j<NCB; j++)
                b[i][j]= rand() % 10; //change
                {
                    // Display Matrix B
                    printf("\n   Matrix B \n");
                    printf("--------------\n");
                    for(i=0; i<NRA; ++i)
                    {
                     for(j=0; j<NCB; ++j)
                       printf("%3d", b[i][j]);
                       printf("\n");
                    }
                }
          printf("\n\n");
          printf("\n\n  Number of slave process = %d\n",slave_process);  // Display Total number of slave process
    
          // Send matrix to slave process
          averow = NRA/slave_process;
          extra = NRA%slave_process;
          offset = 0;
          mtype = FROM_MASTER;
          for (dest=1; dest<=slave_process; dest++)
          {
    
             if (dest <= extra)  // If total rows not divisible equally by slave process
                rows = averow+1; // then assign extra row to a slave process
             else
                rows = averow;   // If number of rows match number of slave process equally
    
             printf("\n  Sending %d rows to process %d\n",rows,dest);
    
             MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD); // Begin sending rows to all slave process
    
             MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);   // Rows are being sent to all slave process
    
             MPI_Send(&a[offset][0], rows*NCA, MPI_INT, dest, mtype,MPI_COMM_WORLD);  // Total number of elements being sent
    
             MPI_Send(&b, NCA*NCB, MPI_INT, dest, mtype, MPI_COMM_WORLD);   // Send all Matrix B elements to all slave process
    
             offset = offset + rows;  // Increase offset for next slave process
          }
    
          // Wait results from slave processes
          mtype = FROM_SLAVE;
          for (i=1; i<=slave_process; i++)  // Get result from each slave process
          {
             source = i;
             MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
             MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); // Result values from slave process
             MPI_Recv(&c[offset][0], rows*NCB, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); // Result values from slave process
          }
    
          // Display result of computation : Matrix C
          printf("\nResult : Matrix C \n");
          printf("-----------------\n");
          for (i=0; i<NRA; i++)
          {
             printf("\n");
             for (j=0; j<NCB; j++)
                printf("%3d   ", c[i][j]);
          }
          printf ("\n");
    
    
       }
    
       /**************************** Slave Process Computation ************************************/
    
       if (myrank != MASTER)
       {
          mtype = FROM_MASTER;
          MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
          printf ("\noffset =%d\n", offset);
          MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
          MPI_Recv(&a, rows*NCA, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
          MPI_Recv(&b, NCA*NCB, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
    
          // Matrix calculation block
          for (k=0; k<NCB; k++)
             for (i=0; i<rows; i++)
             {
                c[i][k] = 0;
                for (j=0; j<NCA; j++)
                   c[i][k] = c[i][k] + a[i][j] * b[j][k];
             }
    
          mtype = FROM_SLAVE;
          MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
          MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
          MPI_Send(&c, rows*NCB, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
       }
    
       if (myrank == MASTER)
       {
        endtime = MPI_Wtime();
        elapsed = endtime-starttime;
        printf("\n\nParellel Time %f seconds\n",elapsed);
       }
       MPI_Finalize(); // Termination of MPI environment
    }
    in the code above what is role of "offset" both in the master and slave process ?

  2. #2
    Deathray Engineer MacGyver's Avatar
    Join Date
    Mar 2007
    Posts
    3,211
    Probably to be an offset.

  3. #3
    Registered User Codeplug's Avatar
    Join Date
    Mar 2003
    Posts
    4,669
    >> ... what is role of "offset" ...
    Code:
    // Wait results from slave processes
    ...
    MPI_Recv(&c[offset][0], rows*NCB, ...
    The slaves don't need it for their calculations, but they do send it back so that the master will know where in C to put the slave's results. Passing the offset back and forth wasn't really necessary since it receives from each slave sequentially.

    gg

  4. #4
    Registered User
    Join Date
    Apr 2006
    Posts
    13
    Thanks alot , have a great day.

Popular pages Recent additions subscribe to a feed

Similar Threads

  1. Communication using MPI
    By Cell in forum Linux Programming
    Replies: 9
    Last Post: 08-13-2009, 02:28 AM
  2. MPI in C
    By ltee in forum C Programming
    Replies: 5
    Last Post: 03-26-2009, 06:10 AM
  3. Sorting whit MPI
    By isato in forum C Programming
    Replies: 0
    Last Post: 03-03-2009, 09:38 AM
  4. Malloc and MPI
    By moddinati in forum C Programming
    Replies: 17
    Last Post: 03-07-2008, 06:55 PM
  5. MPI programming
    By kris.c in forum Tech Board
    Replies: 1
    Last Post: 12-08-2006, 11:25 AM

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21