Changeset 14875
- Timestamp:
- 05/03/13 15:56:49 (12 years ago)
- Location:
- issm/trunk-jpl/src/c
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/classes/objects/Bucket.h
r14868 r14875 151 151 }; 152 152 /*}}}*/ 153 void SetLocalMatrixValues(double* local_matrix,int lower_row,int global_N){ /*{{{*/154 155 int i,j;156 for(i=0;i<m;i++){157 for(j=0;j<n;j++){158 *(local_matrix+global_N*(idxm[i]-lower_row)+idxn[j])=*(values+n*i+j);159 }160 }161 162 };163 /*}}}*/164 void SetLocalVectorValues(double* local_vector,int lower_row){ /*{{{*/165 166 int i;167 for(i=0;i<m;i++){168 local_vector[idxm[i]-lower_row]=values[i];169 }170 };171 /*}}}*/172 153 int BucketType(void){ /*{{{*/ 173 154 … … 205 186 206 187 /*increment buffer for next Bucket who will marshall his data: */ 207 row_indices_forcpu+= m*n;208 col_indices_forcpu+= m*n;209 values_forcpu+= m*n;210 modes_forcpu+= m*n;188 row_indices_forcpu+=(m*n); 189 col_indices_forcpu+=(m*n); 190 values_forcpu+=(m*n); 191 modes_forcpu+=(m*n); 211 192 212 193 /*output modified buffers: */ … … 221 202 /*intermediary: */ 222 203 int i; 223 int j;224 204 225 205 /*buffers: */ … … 262 242 }; 263 243 /*}}}*/ 264 #ifdef _HAVE_MPI_265 void Isend(int receiver_rank,MPI_Request* requests,int* pcount,MPI_Comm comm){ /*{{{*/266 int count=0;267 int int_mode;268 269 /*Recover pointer: */270 count=*pcount;271 272 /*Send all the information required: */273 MPI_Isend(&type,1,MPI_INT,receiver_rank,2,comm,requests+count); count++;274 MPI_Isend(&m,1,MPI_INT,receiver_rank,3,comm,requests+count); count++;275 if(m){ MPI_Isend(idxm,m,MPI_INT,receiver_rank,4,comm,requests+count); count++; }276 if(type==MATRIX_BUCKET){277 MPI_Isend(&n,1,MPI_INT,receiver_rank,5,comm,requests+count); count++;278 if(n){ MPI_Isend(idxn,n,MPI_INT,receiver_rank,6,comm,requests+count); count++; }279 if(m*n){ MPI_Isend(values,m*n,MPI_DOUBLE,receiver_rank,7,comm,requests+count); count++; }280 }281 else{282 if(m){ MPI_Isend(values,m,MPI_DOUBLE,receiver_rank,7,comm,requests+count); count++; }283 }284 int_mode=(int)mode;285 MPI_Isend(&int_mode,1,MPI_INT,receiver_rank,8,comm,requests+count); count++;286 287 /*Allocate pointers: */288 *pcount=count;289 290 } /*}}}*/291 void Recv(int sender_rank, MPI_Comm comm){ /*{{{*/292 293 MPI_Status status;294 int int_mode;295 296 MPI_Recv(&type,1, MPI_INT,sender_rank,2, comm, &status);297 MPI_Recv(&m,1, MPI_INT,sender_rank,3, comm, &status);298 if(m){299 idxm=new int[m];300 MPI_Recv(idxm,m, MPI_INT,sender_rank,4, comm, &status);301 }302 if(type==MATRIX_BUCKET){303 MPI_Recv(&n,1, MPI_INT,sender_rank,5, comm, &status);304 if(n){305 idxn=new int[n];306 MPI_Recv(idxn,n, MPI_INT,sender_rank,6, comm, &status);307 }308 if(m*n){309 values=new doubletype[m*n];310 MPI_Recv(values,m*n, MPI_DOUBLE,sender_rank,7, comm, &status);311 }312 }313 else{314 if(m){315 values=new doubletype[m];316 MPI_Recv(values,m, MPI_DOUBLE,sender_rank,7, comm, &status);317 }318 }319 MPI_Recv(&int_mode,1, MPI_INT,sender_rank,8, comm, &status);320 mode=(InsMode)int_mode;321 322 } /*}}}*/323 #endif324 244 }; 325 245 -
issm/trunk-jpl/src/c/toolkits/issm/IssmDenseMat.h
r14867 r14875 151 151 return norm; 152 152 break; 153 case NORM_FROBENIUS: 154 norm=0; 155 for(i=0;i<this->M;i++){ 156 for(j=0;j<this->N;j++){ 157 norm+=pow(this->matrix[N*i+j],2); 158 } 159 } 160 return sqrt(norm); 161 break; 162 163 153 164 default: 154 165 _error_("unknown norm !"); -
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiDenseMat.h
r14864 r14875 162 162 } 163 163 /*}}}*/ 164 /*FUNCTION Assemble2{{{*/165 void Assemble2(){166 167 int i;168 int j;169 int k;170 int my_rank;171 int num_procs;172 int *RowRank = NULL;173 174 DataSet **bucketspercpu = NULL;175 int *bucketspercpu_sizes = NULL;176 MPI_Request *requests = NULL;177 MPI_Status *statuses = NULL;178 MPI_Status status;179 int num_requests = 0;180 DataSet *mybuckets = NULL;181 int lower_row;182 int upper_row;183 int count = 0;184 185 int size;186 187 /*some communicator info: */188 num_procs=IssmComm::GetSize();189 my_rank=IssmComm::GetRank();190 MPI_Comm comm=IssmComm::GetComm();191 192 /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */193 RowRank=DetermineRowRankFromLocalSize(M,m,comm);194 195 /*Now, sort out our dataset of buckets according to cpu ownership of rows: */196 bucketspercpu=xNew<DataSet*>(num_procs);197 bucketspercpu_sizes=xNew<int>(num_procs);198 199 for(i=0;i<num_procs;i++){200 DataSet* bucketsofcpu_i=new DataSet();201 for (j=0;j<buckets->Size();j++){202 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(j);203 bucket->SpawnBucketsPerCpu(bucketsofcpu_i,i,RowRank);204 }205 bucketspercpu[i]=bucketsofcpu_i;206 bucketspercpu_sizes[i]=bucketsofcpu_i->Size();207 }208 209 /*Recap, each cpu has num_procs datasets of buckets. For a certain cpu j, for a given dataset i, the buckets this210 * dataset owns correspond to rows that are owned by cpu i, not j!:*/211 212 /*First, figure out how many requests are going to be sent by MPI_Isend. Do this a little bit better? */213 for(i=0;i<num_procs;i++){214 if(i!=my_rank){215 num_requests+=bucketspercpu[i]->Size()*MATRIXBUCKETSIZEOFREQUESTS; //this is to take into account all the MPI_ISend calls in each bucket.216 num_requests++; //this is to take into account on MPI_ISend in BucketsSend.217 }218 }219 220 /*Initialize array to track requests and statuses: */221 requests=new MPI_Request[num_requests];222 statuses=new MPI_Status[num_requests];223 224 /*Now, go through all our bucketspercpu datasets, and send them to the corresponding cpus. Do not send our own buckets though!: */225 count=0; //count requests226 for(i=0;i<num_procs;i++){227 if(my_rank==i){228 for(j=0;j<num_procs;j++){229 if(j!=i){//only send the buckets that this cpu does not own.230 231 /*Go through the buckets belonging to cpu j, and send them accordingly. */232 DataSet* buckets=bucketspercpu[j];233 MPI_Isend(bucketspercpu_sizes+j,1,MPI_INT,j,1,comm,requests+count); count++; //we use bucketspercpu_sizes because we need a permanent buffer for an asynchronous send234 for(k=0;k<buckets->Size();k++){235 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(k);236 bucket->Isend(j,requests,&count,comm);237 }238 }239 }240 }241 else{242 243 /*Receive buckets from cpu i, and add them to my own my_rank bucket list: */244 /*First, are we receiving anything from sender_rank? :*/245 MPI_Recv(&size,1, MPI_INT,i,1, comm, &status);246 247 /*If so, started receiving extra buckets and plug them into out buckets: */248 if(size){249 for(j=0;j<size;j++){250 Bucket<doubletype>* bucket=new Bucket<doubletype>();251 bucket->Recv(i,comm);252 bucketspercpu[my_rank]->AddObject(bucket);253 }254 }255 }256 }257 /*Wait for all requests to complete: */258 MPI_Waitall(num_requests,requests,statuses);259 260 /*Every cpu now has a dataset of buckets in bucketspercpu[my_rank], which holds all the values261 *local to this cpu that should be added to the global matrix. Just do that: */262 GetOwnershipBoundariesFromRange(&lower_row,&upper_row,m,comm);263 mybuckets=bucketspercpu[my_rank];264 265 for(i=0;i<mybuckets->Size();i++){266 Bucket<doubletype>* bucket=(Bucket<doubletype>*)mybuckets->GetObjectByOffset(i);267 bucket->SetLocalMatrixValues(this->matrix,lower_row,N);268 }269 270 /*Free ressources:{{{*/271 xDelete<int>(RowRank);272 for(i=0;i<num_procs;i++){273 DataSet* buckets=bucketspercpu[i];274 delete buckets;275 }276 xDelete<DataSet*>(bucketspercpu);277 xDelete<int>(bucketspercpu_sizes);278 xDelete<MPI_Request>(requests);279 /*}}}*/280 }281 /*}}}*/282 164 /*FUNCTION Assemble{{{*/ 283 165 void Assemble(){ … … 447 329 case NORM_INF: 448 330 local_norm=0; 449 for(i=0;i<this-> M;i++){331 for(i=0;i<this->m;i++){ 450 332 absolute=0; 451 333 for(j=0;j<this->N;j++){ … … 458 340 return norm; 459 341 break; 342 case NORM_FROBENIUS: 343 local_norm=0; 344 for(i=0;i<this->m;i++){ 345 for(j=0;j<this->N;j++){ 346 local_norm+=pow(this->matrix[N*i+j],2); 347 } 348 } 349 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, 0, IssmComm::GetComm()); 350 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm()); 351 return sqrt(norm); 352 break; 353 354 460 355 default: 461 356 _error_("unknown norm !"); -
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiVec.h
r14869 r14875 295 295 } 296 296 /*}}}*/ 297 /*FUNCTION Assemble2{{{*/298 void Assemble2(){299 300 int i;301 int j;302 int k;303 int my_rank;304 int num_procs;305 int *RowRank = NULL;306 307 DataSet **bucketspercpu = NULL;308 int *bucketspercpu_sizes = NULL;309 MPI_Request *requests = NULL;310 MPI_Status *statuses = NULL;311 MPI_Status status;312 int num_requests = 0;313 DataSet *mybuckets = NULL;314 int lower_row;315 int upper_row;316 int count = 0;317 318 int size;319 320 /*some communicator info: */321 num_procs=IssmComm::GetSize();322 my_rank=IssmComm::GetRank();323 MPI_Comm comm=IssmComm::GetComm();324 325 /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */326 RowRank=DetermineRowRankFromLocalSize(M,m,comm);327 328 /*Now, sort out our dataset of buckets according to cpu ownership of rows: */329 bucketspercpu=xNew<DataSet*>(num_procs);330 bucketspercpu_sizes=xNew<int>(num_procs);331 332 for(i=0;i<num_procs;i++){333 DataSet* bucketsofcpu_i=new DataSet();334 for (j=0;j<buckets->Size();j++){335 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(j);336 bucket->SpawnBucketsPerCpu(bucketsofcpu_i,i,RowRank);337 }338 bucketspercpu[i]=bucketsofcpu_i;339 bucketspercpu_sizes[i]=bucketsofcpu_i->Size();340 }341 342 /*Recap, each cpu has num_procs datasets of buckets. For a certain cpu j, for a given dataset i, the buckets this343 * dataset owns correspond to rows that are owned by cpu i, not j!:*/344 345 /*First, figure out how many requests are going to be sent by MPI_Isend. Do this a little bit better? */346 for(i=0;i<num_procs;i++){347 if(i!=my_rank){348 num_requests+=bucketspercpu[i]->Size()*VECTORBUCKETSIZEOFREQUESTS; //this is to take into account all the MPI_ISend calls in each bucket.349 num_requests++; //this is to take into account on MPI_ISend in BucketsSend.350 }351 }352 353 /*Initialize array to track requests and statuses: */354 requests=new MPI_Request[num_requests];355 statuses=new MPI_Status[num_requests];356 357 /*Now, go through all our bucketspercpu datasets, and send them to the corresponding cpus. Do not send our own buckets though!: */358 count=0; //count requests359 for(i=0;i<num_procs;i++){360 if(my_rank==i){361 for(j=0;j<num_procs;j++){362 if(j!=i){//only send the buckets that this cpu does not own.363 364 /*Go through the buckets belonging to cpu j, and send them accordingly. */365 DataSet* buckets=bucketspercpu[j];366 MPI_Isend(bucketspercpu_sizes+j,1,MPI_INT,j,1,comm,requests+count); count++; //we use bucketspercpu_sizes because we need a permanent buffer for an asynchronous send367 for(k=0;k<buckets->Size();k++){368 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(k);369 bucket->Isend(j,requests,&count,comm);370 }371 }372 }373 }374 else{375 376 /*Receive buckets from cpu i, and add them to my own my_rank bucket list: */377 /*First, are we receiving anything from sender_rank? :*/378 MPI_Recv(&size,1, MPI_INT,i,1, comm, &status);379 380 /*If so, started receiving extra buckets and plug them into out buckets: */381 if(size){382 for(j=0;j<size;j++){383 Bucket<doubletype>* bucket=new Bucket<doubletype>();384 bucket->Recv(i,comm);385 bucketspercpu[my_rank]->AddObject(bucket);386 }387 }388 }389 }390 /*Wait for all requests to complete: */391 MPI_Waitall(num_requests,requests,statuses);392 393 /*Every cpu now has a dataset of buckets in bucketspercpu[my_rank], which holds all the values394 *local to this cpu that should be added to the global matrix. Just do that: */395 GetOwnershipBoundariesFromRange(&lower_row,&upper_row,m,comm);396 mybuckets=bucketspercpu[my_rank];397 398 for(i=0;i<mybuckets->Size();i++){399 Bucket<doubletype>* bucket=(Bucket<doubletype>*)mybuckets->GetObjectByOffset(i);400 bucket->SetLocalVectorValues(this->vector,lower_row);401 }402 403 /*Free ressources:{{{*/404 xDelete<int>(RowRank);405 for(i=0;i<num_procs;i++){406 DataSet* buckets=bucketspercpu[i];407 delete buckets;408 }409 xDelete<DataSet*>(bucketspercpu);410 xDelete<int>(bucketspercpu_sizes);411 xDelete<MPI_Request>(requests);412 /*}}}*/413 }414 /*}}}*/415 297 /*FUNCTION SetValues{{{*/ 416 298 void SetValues(int ssize, int* list, doubletype* values, InsMode mode){ … … 571 453 local_norm=0; for(i=0;i<this->m;i++)local_norm=max(local_norm,this->vector[i]); 572 454 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE, MPI_MAX, 0, IssmComm::GetComm()); 455 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm()); 573 456 return norm; 574 457 break; … … 577 460 for(i=0;i<this->m;i++)local_norm+=pow(this->vector[i],2); 578 461 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, 0, IssmComm::GetComm()); 462 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm()); 579 463 return sqrt(norm); 580 464 break; -
issm/trunk-jpl/src/c/toolkits/toolkitsenums.h
r11941 r14875 13 13 14 14 typedef enum {INS_VAL, ADD_VAL} InsMode; 15 typedef enum {NORM_INF,NORM_TWO } NormMode;15 typedef enum {NORM_INF,NORM_TWO,NORM_FROBENIUS} NormMode; 16 16 typedef enum {DENSE_SEQUENTIAL,SPARSE_SEQUENTIAL} MatrixType; 17 17
Note:
See TracChangeset
for help on using the changeset viewer.