Changeset 14875 for issm/trunk-jpl/src/c/toolkits/issm/IssmMpiVec.h
- Timestamp:
- 05/03/13 15:56:49 (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiVec.h
r14869 r14875 295 295 } 296 296 /*}}}*/ 297 /*FUNCTION Assemble2{{{*/298 void Assemble2(){299 300 int i;301 int j;302 int k;303 int my_rank;304 int num_procs;305 int *RowRank = NULL;306 307 DataSet **bucketspercpu = NULL;308 int *bucketspercpu_sizes = NULL;309 MPI_Request *requests = NULL;310 MPI_Status *statuses = NULL;311 MPI_Status status;312 int num_requests = 0;313 DataSet *mybuckets = NULL;314 int lower_row;315 int upper_row;316 int count = 0;317 318 int size;319 320 /*some communicator info: */321 num_procs=IssmComm::GetSize();322 my_rank=IssmComm::GetRank();323 MPI_Comm comm=IssmComm::GetComm();324 325 /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */326 RowRank=DetermineRowRankFromLocalSize(M,m,comm);327 328 /*Now, sort out our dataset of buckets according to cpu ownership of rows: */329 bucketspercpu=xNew<DataSet*>(num_procs);330 bucketspercpu_sizes=xNew<int>(num_procs);331 332 for(i=0;i<num_procs;i++){333 DataSet* bucketsofcpu_i=new DataSet();334 for (j=0;j<buckets->Size();j++){335 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(j);336 bucket->SpawnBucketsPerCpu(bucketsofcpu_i,i,RowRank);337 }338 bucketspercpu[i]=bucketsofcpu_i;339 bucketspercpu_sizes[i]=bucketsofcpu_i->Size();340 }341 342 /*Recap, each cpu has num_procs datasets of buckets. For a certain cpu j, for a given dataset i, the buckets this343 * dataset owns correspond to rows that are owned by cpu i, not j!:*/344 345 /*First, figure out how many requests are going to be sent by MPI_Isend. Do this a little bit better? */346 for(i=0;i<num_procs;i++){347 if(i!=my_rank){348 num_requests+=bucketspercpu[i]->Size()*VECTORBUCKETSIZEOFREQUESTS; //this is to take into account all the MPI_ISend calls in each bucket.349 num_requests++; //this is to take into account on MPI_ISend in BucketsSend.350 }351 }352 353 /*Initialize array to track requests and statuses: */354 requests=new MPI_Request[num_requests];355 statuses=new MPI_Status[num_requests];356 357 /*Now, go through all our bucketspercpu datasets, and send them to the corresponding cpus. Do not send our own buckets though!: */358 count=0; //count requests359 for(i=0;i<num_procs;i++){360 if(my_rank==i){361 for(j=0;j<num_procs;j++){362 if(j!=i){//only send the buckets that this cpu does not own.363 364 /*Go through the buckets belonging to cpu j, and send them accordingly. */365 DataSet* buckets=bucketspercpu[j];366 MPI_Isend(bucketspercpu_sizes+j,1,MPI_INT,j,1,comm,requests+count); count++; //we use bucketspercpu_sizes because we need a permanent buffer for an asynchronous send367 for(k=0;k<buckets->Size();k++){368 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(k);369 bucket->Isend(j,requests,&count,comm);370 }371 }372 }373 }374 else{375 376 /*Receive buckets from cpu i, and add them to my own my_rank bucket list: */377 /*First, are we receiving anything from sender_rank? :*/378 MPI_Recv(&size,1, MPI_INT,i,1, comm, &status);379 380 /*If so, started receiving extra buckets and plug them into out buckets: */381 if(size){382 for(j=0;j<size;j++){383 Bucket<doubletype>* bucket=new Bucket<doubletype>();384 bucket->Recv(i,comm);385 bucketspercpu[my_rank]->AddObject(bucket);386 }387 }388 }389 }390 /*Wait for all requests to complete: */391 MPI_Waitall(num_requests,requests,statuses);392 393 /*Every cpu now has a dataset of buckets in bucketspercpu[my_rank], which holds all the values394 *local to this cpu that should be added to the global matrix. Just do that: */395 GetOwnershipBoundariesFromRange(&lower_row,&upper_row,m,comm);396 mybuckets=bucketspercpu[my_rank];397 398 for(i=0;i<mybuckets->Size();i++){399 Bucket<doubletype>* bucket=(Bucket<doubletype>*)mybuckets->GetObjectByOffset(i);400 bucket->SetLocalVectorValues(this->vector,lower_row);401 }402 403 /*Free ressources:{{{*/404 xDelete<int>(RowRank);405 for(i=0;i<num_procs;i++){406 DataSet* buckets=bucketspercpu[i];407 delete buckets;408 }409 xDelete<DataSet*>(bucketspercpu);410 xDelete<int>(bucketspercpu_sizes);411 xDelete<MPI_Request>(requests);412 /*}}}*/413 }414 /*}}}*/415 297 /*FUNCTION SetValues{{{*/ 416 298 void SetValues(int ssize, int* list, doubletype* values, InsMode mode){ … … 571 453 local_norm=0; for(i=0;i<this->m;i++)local_norm=max(local_norm,this->vector[i]); 572 454 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE, MPI_MAX, 0, IssmComm::GetComm()); 455 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm()); 573 456 return norm; 574 457 break; … … 577 460 for(i=0;i<this->m;i++)local_norm+=pow(this->vector[i],2); 578 461 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, 0, IssmComm::GetComm()); 462 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm()); 579 463 return sqrt(norm); 580 464 break;
Note:
See TracChangeset
for help on using the changeset viewer.