Changeset 13590
- Timestamp:
- 10/10/12 20:56:09 (12 years ago)
- Location:
- issm/trunk-jpl/src/c/modules
- Files:
-
- 38 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/modules/ConstraintsStatex/RiftConstraintsState.cpp
r12515 r13590 33 33 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);36 MPI_Bcast(&mpi_found,1,MPI_INT,0, MPI_COMM_WORLD);35 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 36 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 37 37 found=mpi_found; 38 38 #endif … … 96 96 97 97 #ifdef _HAVE_MPI_ 98 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);99 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0, MPI_COMM_WORLD);98 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 99 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 100 100 num_unstable_constraints=sum_num_unstable_constraints; 101 101 #endif … … 136 136 /*Is there just one found? that would mean we have frozen! : */ 137 137 #ifdef _HAVE_MPI_ 138 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_MAX,0, MPI_COMM_WORLD);139 MPI_Bcast(&mpi_found,1,MPI_INT,0, MPI_COMM_WORLD);138 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 139 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 140 140 found=mpi_found; 141 141 #endif … … 196 196 197 197 #ifdef _HAVE_MPI_ 198 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);199 MPI_Bcast(&mpi_found,1,MPI_INT,0, MPI_COMM_WORLD);198 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 199 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 200 200 found=mpi_found; 201 201 #endif … … 229 229 230 230 #ifdef _HAVE_MPI_ 231 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);232 MPI_Bcast(&mpi_found,1,MPI_INT,0, MPI_COMM_WORLD);231 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 232 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 233 233 found=mpi_found; 234 234 #endif … … 290 290 291 291 #ifdef _HAVE_MPI_ 292 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);293 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0, MPI_COMM_WORLD);292 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 293 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 294 294 num_unstable_constraints=sum_num_unstable_constraints; 295 295 #endif … … 330 330 331 331 #ifdef _HAVE_MPI_ 332 MPI_Reduce (&max_penetration,&mpi_max_penetration,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);333 MPI_Bcast(&mpi_max_penetration,1,MPI_DOUBLE,0, MPI_COMM_WORLD);332 MPI_Reduce (&max_penetration,&mpi_max_penetration,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 333 MPI_Bcast(&mpi_max_penetration,1,MPI_DOUBLE,0,IssmComm::GetComm()); 334 334 max_penetration=mpi_max_penetration; 335 335 #endif … … 369 369 370 370 #ifdef _HAVE_MPI_ 371 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);372 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0, MPI_COMM_WORLD);371 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 372 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 373 373 num_unstable_constraints=sum_num_unstable_constraints; 374 374 #endif -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalConstraintsState.cpp
r12102 r13590 37 37 38 38 #ifdef _HAVE_MPI_ 39 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);40 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0, MPI_COMM_WORLD);39 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 40 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 41 41 num_unstable_constraints=sum_num_unstable_constraints; 42 42 #endif -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalIsPresent.cpp
r12102 r13590 29 29 30 30 #ifdef _HAVE_MPI_ 31 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0, MPI_COMM_WORLD);32 MPI_Bcast(&mpi_found,1,MPI_INT,0, MPI_COMM_WORLD);31 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 32 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 33 33 found=mpi_found; 34 34 #endif -
issm/trunk-jpl/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/ElementResponsex/ElementResponsex.cpp
r13073 r13590 38 38 /*Broadcast whether we found the element: */ 39 39 #ifdef _HAVE_MPI_ 40 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD);40 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 41 41 if(!sumfound)_error_("could not find material with id" << index << " to compute ElementResponse"); 42 42 #endif … … 49 49 /*Broadcast and plug into response: */ 50 50 #ifdef _HAVE_MPI_ 51 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX, MPI_COMM_WORLD);52 MPI_Bcast(&response,1,MPI_DOUBLE,cpu_found, MPI_COMM_WORLD);51 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 52 MPI_Bcast(&response,1,MPI_DOUBLE,cpu_found,IssmComm::GetComm()); 53 53 #endif 54 54 -
issm/trunk-jpl/src/c/modules/GroundinglineMigrationx/GroundinglineMigrationx.cpp
r13216 r13590 157 157 158 158 #ifdef _HAVE_MPI_ 159 MPI_Allreduce(&local_nflipped,&nflipped,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD);159 MPI_Allreduce(&local_nflipped,&nflipped,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 160 160 if(VerboseConvergence()) _pprintLine_(" Additional number of vertices allowed to unground: " << nflipped); 161 161 #else -
issm/trunk-jpl/src/c/modules/IceVolumex/IceVolumex.cpp
r13073 r13590 20 20 } 21 21 #ifdef _HAVE_MPI_ 22 MPI_Reduce(&local_ice_volume,&total_ice_volume,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);23 MPI_Bcast(&total_ice_volume,1,MPI_DOUBLE,0, MPI_COMM_WORLD);22 MPI_Reduce(&local_ice_volume,&total_ice_volume,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 23 MPI_Bcast(&total_ice_volume,1,MPI_DOUBLE,0,IssmComm::GetComm()); 24 24 #else 25 25 total_ice_volume=local_ice_volume; -
issm/trunk-jpl/src/c/modules/InputConvergencex/InputConvergencex.cpp
r12515 r13590 31 31 /*In parallel, we need to gather the converged status: */ 32 32 #ifdef _HAVE_MPI_ 33 MPI_Allreduce ( (void*)&num_notconverged,(void*)&total_notconverged,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD);33 MPI_Allreduce ( (void*)&num_notconverged,(void*)&total_notconverged,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 34 34 num_notconverged=total_notconverged; 35 35 #endif -
issm/trunk-jpl/src/c/modules/InputUpdateFromDakotax/InputUpdateFromDakotax.cpp
r13056 r13590 68 68 69 69 #ifdef _DEBUG_ 70 PetscSynchronizedPrintf( MPI_COMM_WORLD,"Parameter matrix:");71 PetscSynchronizedFlush( MPI_COMM_WORLD);70 PetscSynchronizedPrintf(IssmComm::GetComm(),"Parameter matrix:"); 71 PetscSynchronizedFlush(IssmComm::GetComm()); 72 72 for(l=0;l<ncols;l++){ 73 PetscSynchronizedPrintf( MPI_COMM_WORLD," time %i\n",l);74 PetscSynchronizedFlush( MPI_COMM_WORLD);73 PetscSynchronizedPrintf(IssmComm::GetComm()," time %i\n",l); 74 PetscSynchronizedFlush(IssmComm::GetComm()); 75 75 76 76 for(k=0;k<numberofvertices;k++){ 77 PetscSynchronizedPrintf( MPI_COMM_WORLD," node %i value %g\n",k+1,*(parameter+k*ncols+l));78 PetscSynchronizedFlush( MPI_COMM_WORLD);77 PetscSynchronizedPrintf(IssmComm::GetComm()," node %i value %g\n",k+1,*(parameter+k*ncols+l)); 78 PetscSynchronizedFlush(IssmComm::GetComm()); 79 79 } 80 80 } 81 PetscSynchronizedPrintf( MPI_COMM_WORLD," descriptor: %s root %s enum: %i\n",descriptor,root,StringToEnumx(root));82 PetscSynchronizedFlush( MPI_COMM_WORLD);81 PetscSynchronizedPrintf(IssmComm::GetComm()," descriptor: %s root %s enum: %i\n",descriptor,root,StringToEnumx(root)); 82 PetscSynchronizedFlush(IssmComm::GetComm()); 83 83 #endif 84 84 -
issm/trunk-jpl/src/c/modules/Krigingx/pKrigingx.cpp
r13376 r13590 64 64 double *sumpredictions =xNew<double>(n_interp); 65 65 double *sumerror =xNew<double>(n_interp); 66 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM, MPI_COMM_WORLD);67 MPI_Allreduce(error,sumerror,n_interp,MPI_DOUBLE,MPI_SUM, MPI_COMM_WORLD);66 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm()); 67 MPI_Allreduce(error,sumerror,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm()); 68 68 xDelete<double>(error); error=sumerror; 69 69 xDelete<double>(predictions); predictions=sumpredictions; … … 81 81 #ifdef _HAVE_MPI_ 82 82 double *sumpredictions =xNew<double>(n_interp); 83 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM, MPI_COMM_WORLD);83 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm()); 84 84 xDelete<double>(predictions); predictions=sumpredictions; 85 85 #endif … … 98 98 #ifdef _HAVE_MPI_ 99 99 double *sumpredictions =xNew<double>(n_interp); 100 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM, MPI_COMM_WORLD);100 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm()); 101 101 xDelete<double>(predictions); predictions=sumpredictions; 102 102 #endif -
issm/trunk-jpl/src/c/modules/MassFluxx/MassFluxx.cpp
r13483 r13590 60 60 61 61 #ifdef _HAVE_MPI_ 62 MPI_Allreduce ( (void*)&mass_flux,(void*)&all_mass_flux,1,MPI_DOUBLE,MPI_SUM, MPI_COMM_WORLD);62 MPI_Allreduce ( (void*)&mass_flux,(void*)&all_mass_flux,1,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm()); 63 63 mass_flux=all_mass_flux; 64 64 #endif -
issm/trunk-jpl/src/c/modules/MaxAbsVxx/MaxAbsVxx.cpp
r13073 r13590 33 33 /*Figure out maximum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&maxabsvx,&node_maxabsvx,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_maxabsvx,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&maxabsvx,&node_maxabsvx,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_maxabsvx,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 maxabsvx=node_maxabsvx; 38 38 #endif -
issm/trunk-jpl/src/c/modules/MaxAbsVyx/MaxAbsVyx.cpp
r13073 r13590 34 34 /*Figure out maximum across the cluster: */ 35 35 #ifdef _HAVE_MPI_ 36 MPI_Reduce (&maxabsvy,&node_maxabsvy,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);37 MPI_Bcast(&node_maxabsvy,1,MPI_DOUBLE,0, MPI_COMM_WORLD);36 MPI_Reduce (&maxabsvy,&node_maxabsvy,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 37 MPI_Bcast(&node_maxabsvy,1,MPI_DOUBLE,0,IssmComm::GetComm()); 38 38 maxabsvy=node_maxabsvy; 39 39 #endif -
issm/trunk-jpl/src/c/modules/MaxAbsVzx/MaxAbsVzx.cpp
r13073 r13590 33 33 /*Figure out minimum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&maxabsvz,&node_maxabsvz,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_maxabsvz,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&maxabsvz,&node_maxabsvz,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_maxabsvz,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 maxabsvz=node_maxabsvz; 38 38 #endif -
issm/trunk-jpl/src/c/modules/MaxVelx/MaxVelx.cpp
r13073 r13590 34 34 /*Figure out maximum across the cluster: */ 35 35 #ifdef _HAVE_MPI_ 36 MPI_Reduce (&maxvel,&node_maxvel,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);37 MPI_Bcast(&node_maxvel,1,MPI_DOUBLE,0, MPI_COMM_WORLD);36 MPI_Reduce (&maxvel,&node_maxvel,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 37 MPI_Bcast(&node_maxvel,1,MPI_DOUBLE,0,IssmComm::GetComm()); 38 38 maxvel=node_maxvel; 39 39 #endif -
issm/trunk-jpl/src/c/modules/MaxVxx/MaxVxx.cpp
r13073 r13590 33 33 /*Figure out minimum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&maxvx,&node_maxvx,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_maxvx,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&maxvx,&node_maxvx,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_maxvx,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 maxvx=node_maxvx; 38 38 #endif -
issm/trunk-jpl/src/c/modules/MaxVyx/MaxVyx.cpp
r13073 r13590 33 33 /*Figure out minimum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&maxvy,&node_maxvy,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_maxvy,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&maxvy,&node_maxvy,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_maxvy,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 maxvy=node_maxvy; 38 38 #endif -
issm/trunk-jpl/src/c/modules/MaxVzx/MaxVzx.cpp
r13073 r13590 34 34 /*Figure out minimum across the cluster: */ 35 35 #ifdef _HAVE_MPI_ 36 MPI_Reduce (&maxvz,&node_maxvz,1,MPI_DOUBLE,MPI_MAX,0, MPI_COMM_WORLD);37 MPI_Bcast(&node_maxvz,1,MPI_DOUBLE,0, MPI_COMM_WORLD);36 MPI_Reduce (&maxvz,&node_maxvz,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 37 MPI_Bcast(&node_maxvz,1,MPI_DOUBLE,0,IssmComm::GetComm()); 38 38 maxvz=node_maxvz; 39 39 #endif -
issm/trunk-jpl/src/c/modules/MinVelx/MinVelx.cpp
r13073 r13590 34 34 /*Figure out minimum across the cluster: */ 35 35 #ifdef _HAVE_MPI_ 36 MPI_Reduce (&minvel,&node_minvel,1,MPI_DOUBLE,MPI_MIN,0, MPI_COMM_WORLD);37 MPI_Bcast(&node_minvel,1,MPI_DOUBLE,0, MPI_COMM_WORLD);36 MPI_Reduce (&minvel,&node_minvel,1,MPI_DOUBLE,MPI_MIN,0,IssmComm::GetComm() ); 37 MPI_Bcast(&node_minvel,1,MPI_DOUBLE,0,IssmComm::GetComm()); 38 38 minvel=node_minvel; 39 39 #endif -
issm/trunk-jpl/src/c/modules/MinVxx/MinVxx.cpp
r13073 r13590 33 33 /*Figure out minimum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&minvx,&node_minvx,1,MPI_DOUBLE,MPI_MIN,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_minvx,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&minvx,&node_minvx,1,MPI_DOUBLE,MPI_MIN,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_minvx,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 minvx=node_minvx; 38 38 #endif -
issm/trunk-jpl/src/c/modules/MinVyx/MinVyx.cpp
r13073 r13590 33 33 /*Figure out minimum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&minvy,&node_minvy,1,MPI_DOUBLE,MPI_MIN,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_minvy,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&minvy,&node_minvy,1,MPI_DOUBLE,MPI_MIN,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_minvy,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 minvy=node_minvy; 38 38 #endif -
issm/trunk-jpl/src/c/modules/MinVzx/MinVzx.cpp
r13073 r13590 33 33 /*Figure out minimum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&minvz,&node_minvz,1,MPI_DOUBLE,MPI_MIN,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_minvz,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&minvz,&node_minvz,1,MPI_DOUBLE,MPI_MIN,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_minvz,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 minvz=node_minvz; 38 38 #endif -
issm/trunk-jpl/src/c/modules/NodalValuex/NodalValuex.cpp
r13056 r13590 37 37 /*Broadcast whether we found the element: */ 38 38 #ifdef _HAVE_MPI_ 39 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD);39 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 40 40 if(!sumfound)_error_("could not find element with vertex with id" << index << " to compute nodal value " << EnumToStringx(natureofdataenum)); 41 41 #endif … … 43 43 /*Broadcast and plug into response: */ 44 44 #ifdef _HAVE_MPI_ 45 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX, MPI_COMM_WORLD);46 MPI_Bcast(&value,1,MPI_DOUBLE,cpu_found, MPI_COMM_WORLD);45 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 46 MPI_Bcast(&value,1,MPI_DOUBLE,cpu_found,IssmComm::GetComm()); 47 47 #else 48 48 value=cpu_found; -
issm/trunk-jpl/src/c/modules/ParsePetscOptionsx/ParsePetscOptionsx.cpp
r13056 r13590 96 96 /*Ok, broadcast to other cpus: */ 97 97 #ifdef _HAVE_MPI_ 98 MPI_Bcast(&numanalyses,1,MPI_INT,0, MPI_COMM_WORLD);98 MPI_Bcast(&numanalyses,1,MPI_INT,0,IssmComm::GetComm()); 99 99 if(my_rank!=0){ 100 100 analyses=xNew<IssmPDouble>(numanalyses); 101 101 strings=xNew<char*>(numanalyses); 102 102 } 103 MPI_Bcast(analyses,numanalyses,MPI_DOUBLE,0, MPI_COMM_WORLD);103 MPI_Bcast(analyses,numanalyses,MPI_DOUBLE,0,IssmComm::GetComm()); 104 104 #endif 105 105 for(i=0;i<numanalyses;i++){ … … 110 110 if(my_rank==0)stringlength=(strlen(string)+1)*sizeof(char); 111 111 #ifdef _HAVE_MPI_ 112 MPI_Bcast(&stringlength,1,MPI_INT,0, MPI_COMM_WORLD);112 MPI_Bcast(&stringlength,1,MPI_INT,0,IssmComm::GetComm()); 113 113 if(my_rank!=0)string=xNew<char>(stringlength); 114 MPI_Bcast(string,stringlength,MPI_CHAR,0, MPI_COMM_WORLD);114 MPI_Bcast(string,stringlength,MPI_CHAR,0,IssmComm::GetComm()); 115 115 if(my_rank!=0)strings[i]=string; 116 116 #endif -
issm/trunk-jpl/src/c/modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/Solverx/SolverxPetsc.cpp
r13056 r13590 102 102 103 103 /*Prepare solver*/ 104 KSPCreate( MPI_COMM_WORLD,&ksp);104 KSPCreate(IssmComm::GetComm(),&ksp); 105 105 KSPSetOperators(ksp,Kff,Kff,DIFFERENT_NONZERO_PATTERN); 106 106 KSPSetFromOptions(ksp); -
issm/trunk-jpl/src/c/modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/SurfaceAreax/SurfaceAreax.cpp
r12470 r13590 29 29 /*Sum all J from all cpus of the cluster:*/ 30 30 #ifdef _HAVE_MPI_ 31 MPI_Reduce (&S,&S_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);32 MPI_Bcast(&S_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);31 MPI_Reduce (&S,&S_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 32 MPI_Bcast(&S_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 33 33 S=S_sum; 34 34 #endif -
issm/trunk-jpl/src/c/modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp
r13073 r13590 32 32 /*Sum all J from all cpus of the cluster:*/ 33 33 #ifdef _HAVE_MPI_ 34 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);35 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);34 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 35 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 36 36 J=J_sum; 37 37 #endif -
issm/trunk-jpl/src/c/modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/ThicknessAbsGradientx/ThicknessAbsGradientx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/ThicknessAcrossGradientx/ThicknessAcrossGradientx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/ThicknessAlongGradientx/ThicknessAlongGradientx.cpp
r13073 r13590 28 28 /*Sum all J from all cpus of the cluster:*/ 29 29 #ifdef _HAVE_MPI_ 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0, MPI_COMM_WORLD);30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 32 32 J=J_sum; 33 33 #endif -
issm/trunk-jpl/src/c/modules/TimeAdaptx/TimeAdaptx.cpp
r12470 r13590 33 33 /*Figure out minimum across the cluster: */ 34 34 #ifdef _HAVE_MPI_ 35 MPI_Reduce (&min_dt,&node_min_dt,1,MPI_DOUBLE,MPI_MIN,0, MPI_COMM_WORLD);36 MPI_Bcast(&node_min_dt,1,MPI_DOUBLE,0, MPI_COMM_WORLD);35 MPI_Reduce (&min_dt,&node_min_dt,1,MPI_DOUBLE,MPI_MIN,0,IssmComm::GetComm() ); 36 MPI_Bcast(&node_min_dt,1,MPI_DOUBLE,0,IssmComm::GetComm()); 37 37 min_dt=node_min_dt; 38 38 #endif -
issm/trunk-jpl/src/c/modules/TotalSmbx/TotalSmbx.cpp
r13073 r13590 20 20 } 21 21 #ifdef _HAVE_MPI_ 22 MPI_Reduce(&local_smb,&total_smb,1,MPI_DOUBLE,MPI_SUM,0, MPI_COMM_WORLD);23 MPI_Bcast(&total_smb,1,MPI_DOUBLE,0, MPI_COMM_WORLD);22 MPI_Reduce(&local_smb,&total_smb,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 23 MPI_Bcast(&total_smb,1,MPI_DOUBLE,0,IssmComm::GetComm()); 24 24 #else 25 25 total_smb=local_smb;
Note:
See TracChangeset
for help on using the changeset viewer.