I have got this error message when using MPI

Code:
[p-bc2903.hpc2n.umu.se:4063] *** An error occurred in MPI_Comm_group
[p-bc2903.hpc2n.umu.se:4063] *** on communicator MPI_COMM_WORLD
[p-bc2903.hpc2n.umu.se:4063] *** MPI_ERR_COMM: invalid communicator
[p-bc2903.hpc2n.umu.se:4063] *** MPI_ERRORS_ARE_FATAL (your MPI job will now abort)
--------------------------------------------------------------------------
mpiexec has exited due to process rank 0 with PID 4063 on
node p-bc2903.hpc2n.umu.se exiting without calling "finalize". This may
have caused other processes in the application to be
terminated by signals sent by mpiexec (as reported here).
--------------------------------------------------------------------------
[p-bc2903.hpc2n.umu.se:04062] 2 more processes have sent help message help-mpi-errors.txt / mpi_errors_are_fatal
[p-bc2903.hpc2n.umu.se:04062] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
I guess it is my parameters, but I dont understand why. The function works one lap until it calls it self. Then chrashes on the first MPI command. Any ideas ?

Regards.

Code:
/**
 * This is a implementation of Quicksort. The sorting is for arrays of 
 * integers.
 * Course: Parallel Computer Systems, Spring 09
 * @author Tarek Barbiche, [email protected]
 */
#include <mpi.h>
#include<stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define TRUE	1
#define FALSE	0


void globalSort(int *array, int size, int nrOfProcessors, MPI_Comm comm, MPI_Group orig_group){

	int pivot;
	int rank, newRank;
	int newSize;
	int *ranks1;
	int *ranks2;
	int partner;                                                              
	int *concatenate;
	int group;
	int i;
	int err;

	if(nrOfProcessors == 1) {	
		printf("jag är en processor och returnerar\n");     
		return;
	}
	MPI_Comm_group(comm, &orig_group); 

	MPI_Status status;
	MPI_Group new_group; 
	MPI_Comm new_comm; 
	printf("kommer hit   1\n");
	fflush(stdout); 
	err = MPI_Comm_size(comm, &nrOfProcessors);   
	printf ("number of processors %d !!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n ",nrOfProcessors);
	printf("kommer hit   2\n");
	fflush(stdout);
	err =  MPI_Comm_rank(comm, &rank); 
	// printf("rank är %d \n\n",err);
	MPI_Comm_group(comm, &orig_group); 
	group = getGroup (nrOfProcessors, rank); 
	ranks1 = getRank1(nrOfProcessors);
	ranks2 = getRank2(nrOfProcessors); 
	partner = getPartnerToExchangeData(rank, nrOfProcessors);         

	splittedArray newArray;
	//	printf("I am processor no. %d of a total of %d processors! pivot %d\n", rank, nrOfProcessors, pivot);
	printf("Nytt varv \n");
	//   for (i = 0; i < size ;i++)
	//         printf("Jag är Rank %d och efter bytet %d\n", rank, array[i]); 



	// Skapar 2 grupper.   
	if (group == 2) { 
		MPI_Group_incl(orig_group, nrOfProcessors/2, ranks1, &new_group);
	} else if (group == 1)  { 
		MPI_Group_incl(orig_group, nrOfProcessors/2, ranks2, &new_group); 
	} 

	//   for (i = 0; i < size ;i++)
	//       printf("Jag är Rank %d och har %d\n", rank, array[i]); 
	
   // Sätter upp en ny communicator
	MPI_Comm_create(comm, new_group, &new_comm); 


	//MPI_Group_rank (new_group, &newRank); 
	pivot = array[getPivotIndex(size)]; 
	MPI_Bcast(&pivot, 1, MPI_INT, nrOfProcessors/2, comm);
   
	//   printf("I am processor no. %d of a total of %d processors! pivot %d\n", rank, nrOfProcessors, pivot);

	//delar upp arrayen i 2 delar med hänsyn till pivot.  
	newArray = splitArrayInToParts(pivot, array, size);

	//storleken som skall skickas och tas emot
	int sizeToSend; 
	if (group == 2) {
		sizeToSend = newArray.sizeOfsmallerThanPivot;  
	} else if (group == 1){   
		sizeToSend = newArray.sizeOfgreaterThanPivot; 
	}
	/* if (group == 2)
   for (i = 0; i < sizeToSend ;i++)
       printf("Jag är Rank %d och skall skicka %d\n", rank, newArray.smallerThanPivot[i]); 
   else if (group == 1)
   for (i = 0; i < sizeToSend ;i++)
         printf("Jag är Rank %d och skall skicka %d\n", rank, newArray.greaterThanPivot[i]); 
	 */
	int sizeToRecivie;
	//utbytdata
	//arrayer+ newsize 
	//skickar och tar emot storleken.
	if (group == 2) {
		MPI_Send(&sizeToSend, 1 , MPI_INT, partner , 99, comm);
		MPI_Recv(&sizeToRecivie, 1, MPI_INT, partner, 99, comm, &status);     
	} else if (group == 1)  {
		MPI_Recv(&sizeToRecivie, 1, MPI_INT, partner, 99, comm, &status);
		MPI_Send(&sizeToSend, 1 , MPI_INT, partner , 99, comm);      
	}
	///////////////////////////////////////////////////////////////////////////////////////////////////////////                                                                                                                                                                                                                            
	//printf("Jag är Rank %d och ta emot %d element \n", rank,sizeToRecivie);


	//allokera minne för arrayen som skall tas emot
	int *myRecivedArray = (int*) malloc (sizeToRecivie * sizeof(int));
	//nu utbyt  arrayer
	if (group == 2) {
		MPI_Send(newArray.smallerThanPivot, sizeToSend , MPI_INT, partner , 99, comm);
		MPI_Recv(myRecivedArray , sizeToRecivie, MPI_INT, partner, 99, comm, &status);     
	} else if (group == 1)  {
		MPI_Recv(myRecivedArray , sizeToRecivie, MPI_INT, partner, 99, comm, &status);
		MPI_Send(newArray.greaterThanPivot, sizeToSend , MPI_INT, partner , 99, comm);      
	}         

	if (group == 2) {
		concatenate = concatenateTwoArrays(myRecivedArray, newArray.greaterThanPivot, sizeToRecivie, newArray.sizeOfgreaterThanPivot );   
		newSize = sizeToRecivie + newArray.sizeOfgreaterThanPivot;
	} else if (group == 1){   
		concatenate = concatenateTwoArrays(newArray.smallerThanPivot, myRecivedArray, newArray.sizeOfsmallerThanPivot, sizeToRecivie );
		newSize = sizeToRecivie + newArray.sizeOfsmallerThanPivot;  
	}

	//printf("Nu skall jag göra ett rekursivt  nrOfProcessors/2--%d\n, newSize--%d\n", nrOfProcessors/2,newSize);
	return globalSort(concatenate, newSize, nrOfProcessors/2, new_comm, new_group); 
}




int main(int argc, char *argv[]) {
	MPI_Group group;  
	int rank;
	int nrOfProcessors;// = 2; 
	MPI_Init(&argc,&argv); 
	int size = 10;
	int max = 10;
	MPI_Comm comm;
	int *array = createArray(size, max); 

	MPI_Comm_size(MPI_COMM_WORLD, &nrOfProcessors);
	printf("Vi är %d processorer innan vi börjar \n\n", nrOfProcessors);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank); 
	MPI_Comm_group(MPI_COMM_WORLD, &group);
	MPI_Comm_create(MPI_COMM_WORLD, group, &comm);

	chunk c = getPartOfArray(rank, nrOfProcessors, array, size); 
	c.array = quicksort(c.array, c.size);
	globalSort(c.array, c.size, nrOfProcessors, comm, group);  
	MPI_Finalize();

	return 0;
}