2009-03-03, 17:08
#1
Ni fr urskta kommentarerna fr sprutskrifter i koden men jag undrar om ngon har ngon ide om vart det gr fel? Eller jag vet vart men varfr. Det r som ni kan se en rekursiv funktion och det frsta varvet gr bra och jag lyckas skicka det data jag vill. P frsta MPI anropet p andra varvet i rekursionen s krashar det med felmedellandet. Visst mste man kunna skicka communicators och grupper som parametrar?
Mvh
Mvh
Kod:
[p-bc2903.hpc2n.umu.se:4063] *** An error occurred in MPI_Comm_group [p-bc2903.hpc2n.umu.se:4063] *** on communicator MPI_COMM_WORLD [p-bc2903.hpc2n.umu.se:4063] *** MPI_ERR_COMM: invalid communicator [p-bc2903.hpc2n.umu.se:4063] *** MPI_ERRORS_ARE_FATAL (your MPI job will now abort) -------------------------------------------------------------------------- mpiexec has exited due to process rank 0 with PID 4063 on node p-bc2903.hpc2n.umu.se exiting without calling "finalize". This may have caused other processes in the application to be terminated by signals sent by mpiexec (as reported here). -------------------------------------------------------------------------- [p-bc2903.hpc2n.umu.se:04062] 2 more processes have sent help message help-mpi-errors.txt / mpi_errors_are_fatal [p-bc2903.hpc2n.umu.se:04062] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
Kod:
void globalSort(int *array, int size, int nrOfProcessors, MPI_Comm comm, MPI_Group orig_group){ int pivot; int rank, newRank; int newSize; int *ranks1; int *ranks2; int partner; int *concatenate; int group; int i; int err; if(nrOfProcessors == 1) { printf("jag r en processor och returnerar\n"); return; } MPI_Comm_group(comm, &orig_group); MPI_Status status; MPI_Group new_group; MPI_Comm new_comm; printf("kommer hit 1\n"); fflush(stdout); err = MPI_Comm_size(comm, &nrOfProcessors); printf ("number of processors %d !!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n ",nrOfProcessors); printf("kommer hit 2\n"); fflush(stdout); err = MPI_Comm_rank(comm, &rank); // printf("rank r %d \n\n",err); MPI_Comm_group(comm, &orig_group); group = getGroup (nrOfProcessors, rank); ranks1 = getRank1(nrOfProcessors); ranks2 = getRank2(nrOfProcessors); partner = getPartnerToExchangeData(rank, nrOfProcessors); splittedArray newArray; // printf("I am processor no. %d of a total of %d processors! pivot %d\n", rank, nrOfProcessors, pivot); printf("Nytt varv \n"); // for (i = 0; i < size ;i++) // printf("Jag r Rank %d och efter bytet %d\n", rank, array[i]); // Skapar 2 grupper. if (group == 2) { MPI_Group_incl(orig_group, nrOfProcessors/2, ranks1, &new_group); } else if (group == 1) { MPI_Group_incl(orig_group, nrOfProcessors/2, ranks2, &new_group); } // for (i = 0; i < size ;i++) // printf("Jag r Rank %d och har %d\n", rank, array[i]); // Stter upp en ny communicator MPI_Comm_create(comm, new_group, &new_comm); //MPI_Group_rank (new_group, &newRank); pivot = array[getPivotIndex(size)]; MPI_Bcast(&pivot, 1, MPI_INT, nrOfProcessors/2, comm); //Processon i mittens frsta tal fr bli pivot // printf("I am processor no. %d of a total of %d processors! pivot %d\n", rank, nrOfProcessors, pivot); //delar upp arrayen i 2 delar med hnsyn till pivot. newArray = splitArrayInToParts(pivot, array, size); //storleken som skall skickas och tas emot int sizeToSend; if (group == 2) { sizeToSend = newArray.sizeOfsmallerThanPivot; } else if (group == 1){ sizeToSend = newArray.sizeOfgreaterThanPivot; } /* if (group == 2) for (i = 0; i < sizeToSend ;i++) printf("Jag r Rank %d och skall skicka %d\n", rank, newArray.smallerThanPivot[i]); else if (group == 1) for (i = 0; i < sizeToSend ;i++) printf("Jag r Rank %d och skall skicka %d\n", rank, newArray.greaterThanPivot[i]); */ int sizeToRecivie; //utbytdata //arrayer+ newsize //skickar och tar emot storleken. if (group == 2) { MPI_Send(&sizeToSend, 1 , MPI_INT, partner , 99, comm); MPI_Recv(&sizeToRecivie, 1, MPI_INT, partner, 99, comm, &status); } else if (group == 1) { MPI_Recv(&sizeToRecivie, 1, MPI_INT, partner, 99, comm, &status); MPI_Send(&sizeToSend, 1 , MPI_INT, partner , 99, comm); } /////////////////////////////////////////////////////////////////////////////////////////////////////////// //printf("Jag r Rank %d och ta emot %d element \n", rank,sizeToRecivie); //allokera minne fr arrayen som skall tas emot int *myRecivedArray = (int*) malloc (sizeToRecivie * sizeof(int)); //nu utbyt arrayer if (group == 2) { MPI_Send(newArray.smallerThanPivot, sizeToSend , MPI_INT, partner , 99, comm); MPI_Recv(myRecivedArray , sizeToRecivie, MPI_INT, partner, 99, comm, &status); } else if (group == 1) { MPI_Recv(myRecivedArray , sizeToRecivie, MPI_INT, partner, 99, comm, &status); MPI_Send(newArray.greaterThanPivot, sizeToSend , MPI_INT, partner , 99, comm); } if (group == 2) { concatenate = concatenateTwoArrays(myRecivedArray, newArray.greaterThanPivot, sizeToRecivie, newArray.sizeOfgreaterThanPivot ); newSize = sizeToRecivie + newArray.sizeOfgreaterThanPivot; } else if (group == 1){ concatenate = concatenateTwoArrays(newArray.smallerThanPivot, myRecivedArray, newArray.sizeOfsmallerThanPivot, sizeToRecivie ); newSize = sizeToRecivie + newArray.sizeOfsmallerThanPivot; } //Slpp ls allt allokerat minne hr //printf("Nu skall jag gra ett rekursivt nrOfProcessors/2--%d\n, newSize--%d\n", nrOfProcessors/2,newSize); return globalSort(concatenate, newSize, nrOfProcessors/2, new_comm, new_group); }