Changeset 13604
- Timestamp:
- 10/10/12 23:06:05 (12 years ago)
- Location:
- issm/trunk-jpl/src/c/Container
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/Container/Constraints.cpp
r12365 r13604 49 49 /*figure out total number of constraints combining all the cpus (no clones here)*/ 50 50 #ifdef _HAVE_MPI_ 51 MPI_Reduce(&localconstraints,&numberofconstraints,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);52 MPI_Bcast(&numberofconstraints,1,MPI_INT,0, MPI_COMM_WORLD);51 MPI_Reduce(&localconstraints,&numberofconstraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 52 MPI_Bcast(&numberofconstraints,1,MPI_INT,0,IssmComm::GetComm()); 53 53 #else 54 54 numberofconstraints=localconstraints; -
issm/trunk-jpl/src/c/Container/Elements.cpp
r13594 r13604 126 126 /*Synchronize across cluster, so as to not end up with different sizes for each patch on each cpu: */ 127 127 #ifdef _HAVE_MPI_ 128 MPI_Reduce (&numvertices,&max_numvertices,1,MPI_INT,MPI_MAX,0, MPI_COMM_WORLD);129 MPI_Bcast(&max_numvertices,1,MPI_INT,0, MPI_COMM_WORLD);128 MPI_Reduce (&numvertices,&max_numvertices,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 129 MPI_Bcast(&max_numvertices,1,MPI_INT,0,IssmComm::GetComm()); 130 130 numvertices=max_numvertices; 131 131 132 MPI_Reduce (&numnodes,&max_numnodes,1,MPI_INT,MPI_MAX,0, MPI_COMM_WORLD);133 MPI_Bcast(&max_numnodes,1,MPI_INT,0, MPI_COMM_WORLD);132 MPI_Reduce (&numnodes,&max_numnodes,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 133 MPI_Bcast(&max_numnodes,1,MPI_INT,0,IssmComm::GetComm()); 134 134 numnodes=max_numnodes; 135 135 #endif … … 202 202 if(this->Size()) rank=my_rank2; 203 203 else rank=num_procs2; 204 MPI_Allreduce (&rank,&minrank,1,MPI_INT,MPI_MIN, MPI_COMM_WORLD);204 MPI_Allreduce (&rank,&minrank,1,MPI_INT,MPI_MIN,IssmComm::GetComm()); 205 205 #else 206 206 minrank=my_rank2; … … 214 214 } 215 215 #ifdef _HAVE_MPI_ 216 MPI_Bcast(&numberofresults,1,MPI_DOUBLE,minrank, MPI_COMM_WORLD);216 MPI_Bcast(&numberofresults,1,MPI_DOUBLE,minrank,IssmComm::GetComm()); 217 217 #endif 218 218 … … 226 226 resultssteps=xNew<int>(numberofresults); 227 227 } 228 MPI_Bcast(resultsenums,numberofresults,MPI_INT,minrank, MPI_COMM_WORLD);229 MPI_Bcast(resultssizes,numberofresults,MPI_INT,minrank, MPI_COMM_WORLD);230 MPI_Bcast(resultstimes,numberofresults,MPI_DOUBLE,minrank, MPI_COMM_WORLD);231 MPI_Bcast(resultssteps,numberofresults,MPI_INT,minrank, MPI_COMM_WORLD);228 MPI_Bcast(resultsenums,numberofresults,MPI_INT,minrank,IssmComm::GetComm()); 229 MPI_Bcast(resultssizes,numberofresults,MPI_INT,minrank,IssmComm::GetComm()); 230 MPI_Bcast(resultstimes,numberofresults,MPI_DOUBLE,minrank,IssmComm::GetComm()); 231 MPI_Bcast(resultssteps,numberofresults,MPI_INT,minrank,IssmComm::GetComm()); 232 232 #endif 233 233 … … 304 304 local_nelem=this->Size(); 305 305 #ifdef _HAVE_MPI_ 306 MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD);306 MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 307 307 #else 308 308 numberofelements=local_nelem; -
issm/trunk-jpl/src/c/Container/Loads.cpp
r12365 r13604 64 64 /*figure out total number of loads combining all the cpus (no clones here)*/ 65 65 #ifdef _HAVE_MPI_ 66 MPI_Reduce(&localloads,&numberofloads,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);67 MPI_Bcast(&numberofloads,1,MPI_INT,0, MPI_COMM_WORLD);66 MPI_Reduce(&localloads,&numberofloads,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 67 MPI_Bcast(&numberofloads,1,MPI_INT,0,IssmComm::GetComm()); 68 68 #else 69 69 numberofloads=localloads; -
issm/trunk-jpl/src/c/Container/Nodes.cpp
r13594 r13604 90 90 alldofcount=xNew<int>(num_procs2); 91 91 #ifdef _HAVE_MPI_ 92 MPI_Gather(&dofcount,1,MPI_INT,alldofcount,1,MPI_INT,0, MPI_COMM_WORLD);93 MPI_Bcast(alldofcount,num_procs2,MPI_INT,0, MPI_COMM_WORLD);92 MPI_Gather(&dofcount,1,MPI_INT,alldofcount,1,MPI_INT,0,IssmComm::GetComm()); 93 MPI_Bcast(alldofcount,num_procs2,MPI_INT,0,IssmComm::GetComm()); 94 94 #else 95 95 alldofcount[0]=dofcount; … … 129 129 130 130 #ifdef _HAVE_MPI_ 131 MPI_Allreduce((void*)truedofs,(void*)alltruedofs,numnodes*maxdofspernode,MPI_INT,MPI_MAX, MPI_COMM_WORLD);131 MPI_Allreduce((void*)truedofs,(void*)alltruedofs,numnodes*maxdofspernode,MPI_INT,MPI_MAX,IssmComm::GetComm()); 132 132 #else 133 133 for(i=0;i<numnodes*maxdofspernode;i++)alltruedofs[i]=truedofs[i]; … … 174 174 * order of cpu rank. This is also why we initialized this array to num_procs.*/ 175 175 #ifdef _HAVE_MPI_ 176 MPI_Allreduce((void*)ranks,(void*)minranks,numnodes,MPI_INT,MPI_MIN, MPI_COMM_WORLD);176 MPI_Allreduce((void*)ranks,(void*)minranks,numnodes,MPI_INT,MPI_MIN,IssmComm::GetComm()); 177 177 #else 178 178 for(i=0;i<numnodes;i++)minranks[i]=ranks[i]; … … 221 221 /*Grab max of all cpus: */ 222 222 #ifdef _HAVE_MPI_ 223 MPI_Allreduce ( (void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX, MPI_COMM_WORLD);223 MPI_Allreduce ( (void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 224 224 max=allmax; 225 225 #endif … … 255 255 /*Gather from all cpus: */ 256 256 #ifdef _HAVE_MPI_ 257 MPI_Allreduce ( (void*)&numdofs,(void*)&allnumdofs,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD);257 MPI_Allreduce ( (void*)&numdofs,(void*)&allnumdofs,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 258 258 #else 259 259 allnumdofs=numdofs; … … 282 282 /*Gather from all cpus: */ 283 283 #ifdef _HAVE_MPI_ 284 MPI_Allreduce ( (void*)&numnodes,(void*)&allnumnodes,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD);284 MPI_Allreduce ( (void*)&numnodes,(void*)&allnumnodes,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 285 285 #else 286 286 allnumnodes=numnodes; … … 312 312 313 313 #ifdef _HAVE_MPI_ 314 MPI_Reduce (&max_sid,&node_max_sid,1,MPI_INT,MPI_MAX,0, MPI_COMM_WORLD);315 MPI_Bcast(&node_max_sid,1,MPI_INT,0, MPI_COMM_WORLD);314 MPI_Reduce (&max_sid,&node_max_sid,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 315 MPI_Bcast(&node_max_sid,1,MPI_INT,0,IssmComm::GetComm()); 316 316 max_sid=node_max_sid; 317 317 #endif -
issm/trunk-jpl/src/c/Container/Vertices.cpp
r13594 r13604 66 66 allpidcount=xNew<int>(num_procs2); 67 67 #ifdef _HAVE_MPI_ 68 MPI_Gather(&pidcount,1,MPI_INT,allpidcount,1,MPI_INT,0, MPI_COMM_WORLD);69 MPI_Bcast(allpidcount,num_procs2,MPI_INT,0, MPI_COMM_WORLD);68 MPI_Gather(&pidcount,1,MPI_INT,allpidcount,1,MPI_INT,0,IssmComm::GetComm()); 69 MPI_Bcast(allpidcount,num_procs2,MPI_INT,0,IssmComm::GetComm()); 70 70 #else 71 71 allpidcount[0]=pidcount; … … 94 94 } 95 95 #ifdef _HAVE_MPI_ 96 MPI_Allreduce((void*)truepids,(void*)alltruepids,numberofobjects,MPI_INT,MPI_MAX, MPI_COMM_WORLD);96 MPI_Allreduce((void*)truepids,(void*)alltruepids,numberofobjects,MPI_INT,MPI_MAX,IssmComm::GetComm()); 97 97 #else 98 98 for(i=0;i<numberofobjects;i++)alltruepids[i]=truepids[i]; … … 137 137 * order of cpu rank. This is also why we initialized this array to num_procs.*/ 138 138 #ifdef _HAVE_MPI_ 139 MPI_Allreduce ( (void*)ranks,(void*)minranks,numberofobjects,MPI_INT,MPI_MIN, MPI_COMM_WORLD);139 MPI_Allreduce ( (void*)ranks,(void*)minranks,numberofobjects,MPI_INT,MPI_MIN,IssmComm::GetComm()); 140 140 #else 141 141 for(i=0;i<numberofobjects;i++)minranks[i]=ranks[i]; … … 169 169 170 170 #ifdef _HAVE_MPI_ 171 MPI_Reduce (&max_sid,&vertex_max_sid,1,MPI_INT,MPI_MAX,0, MPI_COMM_WORLD);172 MPI_Bcast(&vertex_max_sid,1,MPI_INT,0, MPI_COMM_WORLD);171 MPI_Reduce (&max_sid,&vertex_max_sid,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 172 MPI_Bcast(&vertex_max_sid,1,MPI_INT,0,IssmComm::GetComm()); 173 173 max_sid=vertex_max_sid; 174 174 #endif
Note:
See TracChangeset
for help on using the changeset viewer.