Changeset 14750
- Timestamp:
- 04/24/13 22:24:33 (12 years ago)
- Location:
- issm/trunk-jpl/src/c
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/classes/objects/Bucket.h
r14727 r14750 10 10 #include "./Object.h" 11 11 #include "../../shared/Alloc/alloc.h" 12 #include "../../include/macros.h" 12 13 #include "../../Container/DataSet.h" 13 14 #include "../../toolkits/toolkitsenums.h" 14 15 /*}}}*/ 15 16 16 #define BUCKETSIZEOFREQUESTS 6 /*how many MPI_Isend requests does it take to transfer the contents of a bucket to another cpu?*/ 17 17 /*how many MPI_Isend requests does it take to transfer the contents of a bucket to another cpu?*/ 18 #define MATRIXBUCKETSIZEOFREQUESTS 7 19 #define VECTORBUCKETSIZEOFREQUESTS 5 20 typedef enum {VECTOR_BUCKET, MATRIX_BUCKET} BucketType; 18 21 template <class doubletype> class Bucket: public Object{ 19 22 20 23 private: 24 int type; //either a VECTOR_BUCKET or MATRIX_BUCKET 21 25 int m,n; /*size of local matrix we are storing*/ 22 26 /*row and column indices of the matrix we are storing*/ … … 30 34 /*constructors, destructors: */ 31 35 Bucket(){ /*{{{*/ 36 this->type=0; 32 37 this->m=0; 33 38 this->n=0; … … 38 43 } /*}}}*/ 39 44 Bucket(int min,int* idxmin,int nin,int* idxnin,doubletype* valuesin,InsMode modein){ /*{{{*/ 45 this->type=MATRIX_BUCKET; 40 46 this->m=min; 41 47 this->n=nin; … … 55 61 } /*}}}*/ 56 62 Bucket(int min,int* idxmin,doubletype* valuesin,InsMode modein){ /*{{{*/ 63 this->type=VECTOR_BUCKET; 57 64 this->m=min; 58 65 this->n=1; … … 75 82 /*object virtual functions definitions:*/ 76 83 void Echo(){ /*{{{*/ 77 printf("Bucket echo (cpu #: %i): \n",IssmComm::GetRank()); 78 printf("# rows: %i, #cols: %i\n",this->m,this->n); 84 _printLine_("Bucket echo (cpu #: "<<IssmComm::GetRank()<<")"); 85 _printLine_("bucket type: " << type); 86 _printLine_("num rows: "<<this->m<<" num cols: "<<this->n); 79 87 } /*}}}*/ 80 88 void DeepEcho(){ /*{{{*/ … … 82 90 83 91 _printLine_("Bucket echo (cpu #: "<<IssmComm::GetRank()<<")"); 92 _printLine_("bucket type: " << type); 84 93 _printLine_("num rows: "<<this->m<<" num cols: "<<this->n); 85 for (i=0;i<this->m;i++){ 86 _printLine_("row "<<this->idxm[i]<<", column indices: "); 87 for (j=0;j<this->n;j++){ 88 _printLine_(" "<<this->idxn[j]); 89 } 90 _printLine_("values: "); 91 for (j=0;j<this->n;j++){ 92 _printLine_(" "<<this->values[m*i+j]); 93 } 94 } 94 if(type=MATRIX_BUCKET){ 95 for (i=0;i<this->m;i++){ 96 _printLine_("row "<<this->idxm[i]<<", column indices: "); 97 for (j=0;j<this->n;j++){ 98 _printLine_(" "<<this->idxn[j]); 99 } 100 _printLine_("values: "); 101 for (j=0;j<this->n;j++){ 102 _printLine_(" "<<this->values[m*i+j]); 103 } 104 } 105 } 106 else if(type=VECTOR_BUCKET){ 107 for (i=0;i<this->m;i++){ 108 _printLine_("row "<<this->idxm[i]<<", value " << this->values[i]); 109 } 110 } 111 else _error_("unknown type of bucket!"); 95 112 } 96 113 /*}}}*/ … … 115 132 if (rowranks[idxm[i]]==rank_i){ 116 133 /*This row belongs to cpu rank_i, so spawn a bucket with this row, and add it to the bucketsofcpu_i dataset: */ 117 bucketsofcpu_i->AddObject(new Bucket(1,idxm+i,n,idxn,values+n*i,mode)); 134 if(type==MATRIX_BUCKET){ 135 bucketsofcpu_i->AddObject(new Bucket(1,idxm+i,n,idxn,values+n*i,mode)); 136 } 137 else{ 138 bucketsofcpu_i->AddObject(new Bucket(1,idxm+i,values+i,mode)); 139 } 118 140 } 119 141 } … … 132 154 }; 133 155 /*}}}*/ 156 void SetLocalVectorValues(double* local_vector,int lower_row){ /*{{{*/ 157 158 int i; 159 for(i=0;i<m;i++){ 160 local_vector[idxm[i]-lower_row]=values[i]; 161 } 162 }; 163 /*}}}*/ 134 164 #ifdef _HAVE_MPI_ 135 165 void Isend(int receiver_rank,MPI_Request* requests,int* pcount,MPI_Comm comm){ /*{{{*/ … … 141 171 142 172 /*Send all the information required: */ 173 MPI_Isend(&type,1,MPI_INT,receiver_rank,2,comm,requests+count); count++; 143 174 MPI_Isend(&m,1,MPI_INT,receiver_rank,2,comm,requests+count); count++; 144 175 if(m){ MPI_Isend(idxm,m,MPI_INT,receiver_rank,3,comm,requests+count); count++; } 145 MPI_Isend(&n,1,MPI_INT,receiver_rank,4,comm,requests+count); count++; 146 if(n){ MPI_Isend(idxn,n,MPI_INT,receiver_rank,5,comm,requests+count); count++; } 147 if(m*n){ MPI_Isend(values,m*n,MPI_DOUBLE,receiver_rank,6,comm,requests+count); count++; } 176 if(type==MATRIX_BUCKET){ 177 MPI_Isend(&n,1,MPI_INT,receiver_rank,4,comm,requests+count); count++; 178 if(n){ MPI_Isend(idxn,n,MPI_INT,receiver_rank,5,comm,requests+count); count++; } 179 if(m*n){ MPI_Isend(values,m*n,MPI_DOUBLE,receiver_rank,6,comm,requests+count); count++; } 180 } 181 else{ 182 if(m){ MPI_Isend(values,m,MPI_DOUBLE,receiver_rank,6,comm,requests+count); count++; } 183 } 148 184 int_mode=(int)mode; 149 185 MPI_Isend(&int_mode,1,MPI_INT,receiver_rank,7,comm,requests+count); count++; … … 158 194 int int_mode; 159 195 196 MPI_Recv(&type,1, MPI_INT,sender_rank,2, comm, &status); 160 197 MPI_Recv(&m,1, MPI_INT,sender_rank,2, comm, &status); 161 198 if(m){ … … 163 200 MPI_Recv(idxm,m, MPI_INT,sender_rank,3, comm, &status); 164 201 } 165 MPI_Recv(&n,1, MPI_INT,sender_rank,4, comm, &status); 166 if(n){ 167 idxn=new int[n]; 168 MPI_Recv(idxn,n, MPI_INT,sender_rank,5, comm, &status); 169 } 170 if(m*n){ 171 values=new doubletype[m*n]; 172 MPI_Recv(values,m*n, MPI_DOUBLE,sender_rank,6, comm, &status); 173 } 202 if(type=MATRIX_BUCKET){ 203 MPI_Recv(&n,1, MPI_INT,sender_rank,4, comm, &status); 204 if(n){ 205 idxn=new int[n]; 206 MPI_Recv(idxn,n, MPI_INT,sender_rank,5, comm, &status); 207 } 208 if(m*n){ 209 values=new doubletype[m*n]; 210 MPI_Recv(values,m*n, MPI_DOUBLE,sender_rank,6, comm, &status); 211 } 212 } 213 else{ 214 if(m){ 215 values=new doubletype[m]; 216 MPI_Recv(values,m, MPI_DOUBLE,sender_rank,6, comm, &status); 217 } 218 } 174 219 MPI_Recv(&int_mode,1, MPI_INT,sender_rank,7, comm, &status); 175 220 mode=(InsMode)int_mode; -
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiDenseMat.h
r14709 r14750 203 203 for(i=0;i<num_procs;i++){ 204 204 if(i!=my_rank){ 205 num_requests+=bucketspercpu[i]->Size()* BUCKETSIZEOFREQUESTS; //this is to take into account all the MPI_ISend calls in each bucket.205 num_requests+=bucketspercpu[i]->Size()*MATRIXBUCKETSIZEOFREQUESTS; //this is to take into account all the MPI_ISend calls in each bucket. 206 206 num_requests++; //this is to take into account on MPI_ISend in BucketsSend. 207 207 } -
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiVec.h
r14731 r14750 124 124 /*}}}*/ 125 125 /*FUNCTION Assemble{{{*/ 126 void Assemble(void){ 127 printf("not imlemented yet!"); 128 exit(1); 126 void Assemble(){ 127 128 129 int i; 130 int j; 131 int k; 132 int my_rank; 133 int num_procs; 134 int *RowRank = NULL; 135 136 DataSet **bucketspercpu = NULL; 137 int *bucketspercpu_sizes = NULL; 138 MPI_Request *requests = NULL; 139 MPI_Status *statuses = NULL; 140 MPI_Status status; 141 int num_requests = 0; 142 DataSet *mybuckets = NULL; 143 int lower_row; 144 int upper_row; 145 int count = 0; 146 147 int size; 148 149 150 151 /*some communicator info: */ 152 num_procs=IssmComm::GetSize(); 153 my_rank=IssmComm::GetRank(); 154 MPI_Comm comm=IssmComm::GetComm(); 155 156 /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */ 157 RowRank=DetermineRowRankFromLocalSize(M,m,comm); 158 159 /*Now, sort out our dataset of buckets according to cpu ownership of rows: */ 160 bucketspercpu=xNew<DataSet*>(num_procs); 161 bucketspercpu_sizes=xNew<int>(num_procs); 162 163 for(i=0;i<num_procs;i++){ 164 DataSet* bucketsofcpu_i=new DataSet(); 165 for (j=0;j<buckets->Size();j++){ 166 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(j); 167 bucket->SpawnBucketsPerCpu(bucketsofcpu_i,i,RowRank); 168 } 169 bucketspercpu[i]=bucketsofcpu_i; 170 bucketspercpu_sizes[i]=bucketsofcpu_i->Size(); 171 } 172 173 /*Recap, each cpu has num_procs datasets of buckets. For a certain cpu j, for a given dataset i, the buckets this 174 * dataset owns correspond to rows that are owned by cpu i, not j!:*/ 175 176 /*First, figure out how many requests are going to be sent by MPI_Isend. Do this a little bit better? */ 177 for(i=0;i<num_procs;i++){ 178 if(i!=my_rank){ 179 num_requests+=bucketspercpu[i]->Size()*VECTORBUCKETSIZEOFREQUESTS; //this is to take into account all the MPI_ISend calls in each bucket. 180 num_requests++; //this is to take into account on MPI_ISend in BucketsSend. 181 } 182 } 183 184 /*Initialize array to track requests and statuses: */ 185 requests=new MPI_Request[num_requests]; 186 statuses=new MPI_Status[num_requests]; 187 188 /*Now, go through all our bucketspercpu datasets, and send them to the corresponding cpus. Do not send our own buckets though!: */ 189 count=0; //count requests 190 for(i=0;i<num_procs;i++){ 191 if(my_rank==i){ 192 for(j=0;j<num_procs;j++){ 193 if(j!=i){//only send the buckets that this cpu does not own. 194 195 /*Go through the buckets belonging to cpu j, and send them accordingly. */ 196 DataSet* buckets=bucketspercpu[j]; 197 MPI_Isend(bucketspercpu_sizes+j,1,MPI_INT,j,1,comm,requests+count); count++; //we use bucketspercpu_sizes because we need a permanent buffer for an asynchronous send 198 for(k=0;k<buckets->Size();k++){ 199 Bucket<doubletype>* bucket=(Bucket<doubletype>*)buckets->GetObjectByOffset(k); 200 bucket->Isend(j,requests,&count,comm); 201 } 202 } 203 } 204 } 205 else{ 206 207 /*Receive buckets from cpu i, and add them to my own my_rank bucket list: */ 208 /*First, are we receiving anything from sender_rank? :*/ 209 MPI_Recv(&size,1, MPI_INT,i,1, comm, &status); 210 211 /*If so, started receiving extra buckets and plug them into out buckets: */ 212 if(size){ 213 for(j=0;j<size;j++){ 214 Bucket<doubletype>* bucket=new Bucket<doubletype>(); 215 bucket->Recv(i,comm); 216 bucketspercpu[my_rank]->AddObject(bucket); 217 } 218 } 219 } 220 } 221 /*Wait for all requests to complete: */ 222 MPI_Waitall(num_requests,requests,statuses); 223 224 /*Every cpu now has a dataset of buckets in bucketspercpu[my_rank], which holds all the values 225 *local to this cpu that should be added to the global matrix. Just do that: */ 226 GetOwnershipBoundariesFromRange(&lower_row,&upper_row,m,comm); 227 mybuckets=bucketspercpu[my_rank]; 228 229 for(i=0;i<mybuckets->Size();i++){ 230 Bucket<doubletype>* bucket=(Bucket<doubletype>*)mybuckets->GetObjectByOffset(i); 231 bucket->SetLocalVectorValues(this->vector,lower_row); 232 } 233 234 /*Free ressources:{{{*/ 235 xDelete<int>(RowRank); 236 for(i=0;i<num_procs;i++){ 237 DataSet* buckets=bucketspercpu[i]; 238 delete buckets; 239 } 240 xDelete<DataSet*>(bucketspercpu); 241 xDelete<int>(bucketspercpu_sizes); 242 xDelete<MPI_Request>(requests); 243 /*}}}*/ 129 244 } 130 245 /*}}}*/
Note:
See TracChangeset
for help on using the changeset viewer.