Changeset 14709
- Timestamp:
- 04/22/13 18:03:21 (12 years ago)
- Location:
- issm/trunk-jpl/src/c
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/classes/objects/Bucket.h
r14685 r14709 10 10 #include "./Object.h" 11 11 #include "../../shared/Alloc/alloc.h" 12 #include "../../Container/DataSet.h" 13 #include "../../toolkits/toolkitsenums.h" 12 14 /*}}}*/ 15 16 #define BUCKETSIZEOFREQUESTS 6 /*how many MPI_Isend requests does it take to transfer the contents of a bucket to another cpu?*/ 13 17 14 18 template <class doubletype> class Bucket: public Object{ … … 25 29 26 30 /*constructors, destructors: */ 31 Bucket(){ /*{{{*/ 32 this->m=0; 33 this->n=0; 34 this->idxm=NULL; 35 this->idxn=NULL; 36 this->values=NULL; 37 mode=INS_VAL; 38 } /*}}}*/ 27 39 Bucket(int min,int* idxmin,int nin,int* idxnin,doubletype* valuesin,InsMode modein){ /*{{{*/ 28 40 this->m=min; … … 79 91 _error_("Not implemented yet (similar to Elements)"); }; 80 92 /*}}}*/ 93 94 /*specific routines of Bucket: */ 95 void SpawnBucketsPerCpu(DataSet* bucketsofcpu_i,int rank_i,int* rowranks){ /*{{{*/ 96 97 int i,j; 98 99 /*go through our idxm index of rows this bucket owns, and spawn buckets 100 *if these rows belong to cpu rank_i. Use rowranks to determine this.*/ 101 for(i=0;i<m;i++){ 102 if (rowranks[idxm[i]]==rank_i){ 103 /*This row belongs to cpu rank_i, so spawn a bucket with this row, and add it to the bucketsofcpu_i dataset: */ 104 bucketsofcpu_i->AddObject(new Bucket(1,idxm+i,n,idxn,values+n*i,mode)); 105 } 106 } 107 108 }; 109 /*}}}*/ 110 void SetLocalMatrixValues(double* local_matrix,int lower_row,int global_N){ /*{{{*/ 111 112 int i,j; 113 for(i=0;i<m;i++){ 114 for(j=0;j<n;j++){ 115 *(local_matrix+global_N*(idxm[i]-lower_row)+idxn[j])=*(values+n*i+j); 116 } 117 } 118 119 }; 120 /*}}}*/ 121 void Isend(int receiver_rank,MPI_Request* requests,int* pcount,MPI_Comm comm){ /*{{{*/ 122 int count=0; 123 int int_mode; 124 125 /*Recover pointer: */ 126 count=*pcount; 127 128 /*Send all the information required: */ 129 MPI_Isend(&m,1,MPI_INT,receiver_rank,2,comm,requests+count); count++; 130 if(m){ MPI_Isend(idxm,m,MPI_INT,receiver_rank,3,comm,requests+count); count++; } 131 MPI_Isend(&n,1,MPI_INT,receiver_rank,4,comm,requests+count); count++; 132 if(n){ MPI_Isend(idxn,n,MPI_INT,receiver_rank,5,comm,requests+count); count++; } 133 if(m*n){ MPI_Isend(values,m*n,MPI_DOUBLE,receiver_rank,6,comm,requests+count); count++; } 134 int_mode=(int)mode; 135 MPI_Isend(&int_mode,1,MPI_INT,receiver_rank,7,comm,requests+count); count++; 136 137 /*Allocate pointers: */ 138 *pcount=count; 139 140 } /*}}}*/ 141 void Recv(int sender_rank, MPI_Comm comm){ /*{{{*/ 142 143 MPI_Status status; 144 int int_mode; 145 146 MPI_Recv(&m,1, MPI_INT,sender_rank,2, comm, &status); 147 if(m){ 148 idxm=new int[m]; 149 MPI_Recv(idxm,m, MPI_INT,sender_rank,3, comm, &status); 150 } 151 MPI_Recv(&n,1, MPI_INT,sender_rank,4, comm, &status); 152 if(n){ 153 idxn=new int[n]; 154 MPI_Recv(idxn,n, MPI_INT,sender_rank,5, comm, &status); 155 } 156 if(m*n){ 157 values=new doubletype[m*n]; 158 MPI_Recv(values,m*n, MPI_DOUBLE,sender_rank,6, comm, &status); 159 } 160 MPI_Recv(&int_mode,1, MPI_INT,sender_rank,7, comm, &status); 161 mode=(InsMode)int_mode; 162 163 } /*}}}*/ 81 164 }; 82 165 -
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiDenseMat.h
r14678 r14709 25 25 #include "../../classes/IssmComm.h" 26 26 #include "../../classes/objects/Bucket.h" 27 #include "../../toolkits/toolkits.h" 27 28 #include <math.h> 28 29 … … 149 150 /*}}}*/ 150 151 /*FUNCTION Assemble{{{*/ 151 void Assemble(void){ 152 _error_("not supported yet!"); 152 void Assemble(){ 153 154 155 int i; 156 int j; 157 int k; 158 int my_rank; 159 int num_procs; 160 int *RowRank = NULL; 161 162 DataSet **bucketspercpu = NULL; 163 int *bucketspercpu_sizes = NULL; 164 MPI_Request *requests = NULL; 165 MPI_Status *statuses = NULL; 166 MPI_Status status; 167 int num_requests = 0; 168 DataSet *mybuckets = NULL; 169 int lower_row; 170 int upper_row; 171 int count = 0; 172 173 int size; 174 175 176 177 /*some communicator info: */ 178 num_procs=IssmComm::GetSize(); 179 my_rank=IssmComm::GetRank(); 180 MPI_Comm comm=IssmComm::GetComm(); 181 182 /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */ 183 RowRank=DetermineRowRankFromLocalSize(M,m,comm); 184 185 /*Now, sort out our dataset of buckets according to cpu ownership of rows: */ 186 bucketspercpu=xNew<DataSet*>(num_procs); 187 bucketspercpu_sizes=xNew<int>(num_procs); 188 189 for(i=0;i<num_procs;i++){ 190 DataSet* bucketsofcpu_i=new DataSet(); 191 for (j=0;j<buckets->Size();j++){ 192 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(j); 193 bucket->SpawnBucketsPerCpu(bucketsofcpu_i,i,RowRank); 194 } 195 bucketspercpu[i]=bucketsofcpu_i; 196 bucketspercpu_sizes[i]=bucketsofcpu_i->Size(); 197 } 198 199 /*Recap, each cpu has num_procs datasets of buckets. For a certain cpu j, for a given dataset i, the buckets this 200 * dataset owns correspond to rows that are owned by cpu i, not j!:*/ 201 202 /*First, figure out how many requests are going to be sent by MPI_Isend. Do this a little bit better? */ 203 for(i=0;i<num_procs;i++){ 204 if(i!=my_rank){ 205 num_requests+=bucketspercpu[i]->Size()*BUCKETSIZEOFREQUESTS; //this is to take into account all the MPI_ISend calls in each bucket. 206 num_requests++; //this is to take into account on MPI_ISend in BucketsSend. 207 } 208 } 209 210 /*Initialize array to track requests and statuses: */ 211 requests=new MPI_Request[num_requests]; 212 statuses=new MPI_Status[num_requests]; 213 214 /*Now, go through all our bucketspercpu datasets, and send them to the corresponding cpus. Do not send our own buckets though!: */ 215 count=0; //count requests 216 for(i=0;i<num_procs;i++){ 217 if(my_rank==i){ 218 for(j=0;j<num_procs;j++){ 219 if(j!=i){//only send the buckets that this cpu does not own. 220 221 /*Go through the buckets belonging to cpu j, and send them accordingly. */ 222 DataSet* buckets=bucketspercpu[j]; 223 MPI_Isend(bucketspercpu_sizes+j,1,MPI_INT,j,1,comm,requests+count); count++; //we use bucketspercpu_sizes because we need a permanent buffer for an asynchronous send 224 for(k=0;k<buckets->Size();k++){ 225 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(k); 226 bucket->Isend(j,requests,&count,comm); 227 } 228 } 229 } 230 } 231 else{ 232 233 /*Receive buckets from cpu i, and add them to my own my_rank bucket list: */ 234 /*First, are we receiving anything from sender_rank? :*/ 235 MPI_Recv(&size,1, MPI_INT,i,1, comm, &status); 236 237 /*If so, started receiving extra buckets and plug them into out buckets: */ 238 if(size){ 239 for(j=0;j<size;j++){ 240 Bucket<doubletype>* bucket=new Bucket<doubletype>(); 241 bucket->Recv(i,comm); 242 bucketspercpu[my_rank]->AddObject(bucket); 243 } 244 } 245 } 246 } 247 /*Wait for all requests to complete: */ 248 MPI_Waitall(num_requests,requests,statuses); 249 250 /*Every cpu now has a dataset of buckets in bucketspercpu[my_rank], which holds all the values 251 *local to this cpu that should be added to the global matrix. Just do that: */ 252 GetOwnershipBoundariesFromRange(&lower_row,&upper_row,m,comm); 253 mybuckets=bucketspercpu[my_rank]; 254 255 for(i=0;i<mybuckets->Size();i++){ 256 Bucket<doubletype>* bucket=(Bucket<doubletype>*)mybuckets->GetObjectByOffset(i); 257 bucket->SetLocalMatrixValues(this->matrix,lower_row,N); 258 } 259 260 /*Free ressources:{{{*/ 261 xDelete<int>(RowRank); 262 for(i=0;i<num_procs;i++){ 263 DataSet* buckets=bucketspercpu[i]; 264 delete buckets; 265 } 266 xDelete<DataSet*>(bucketspercpu); 267 xDelete<int>(bucketspercpu_sizes); 268 xDelete<MPI_Request>(requests); 269 /*}}}*/ 153 270 } 154 271 /*}}}*/ … … 200 317 } 201 318 /*}}}*/ 202 203 319 }; 320 204 321 205 322 #endif //#ifndef _ISSM_MPI_DENSE_MAT_H_
Note:
See TracChangeset
for help on using the changeset viewer.