Changeset 15838
- Timestamp:
- 08/19/13 15:15:59 (12 years ago)
- Location:
- issm/trunk-jpl/src/c
- Files:
-
- 1 deleted
- 52 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/Makefile.am
r15836 r15838 173 173 ./shared/io/Print/Print.h\ 174 174 ./shared/io/Comm/Comm.h\ 175 ./shared/io/Comm/CommDef.h\176 175 ./shared/io/Comm/IssmComm.h\ 177 176 ./shared/io/Comm/IssmComm.cpp\ … … 763 762 #Mpi sources {{{ 764 763 mpi_sources= ./toolkits/mpi/issmmpi.h\ 765 ./toolkits/mpi/issm pi.cpp\764 ./toolkits/mpi/issmmpi.cpp\ 766 765 ./toolkits/mpi/commops/commops.h\ 767 766 ./toolkits/mpi/commops/DetermineLocalSize.cpp\ -
issm/trunk-jpl/src/c/analyses/DakotaSpawnCore.cpp
r15104 r15838 49 49 50 50 /*If counter==-1 on cpu0, it means that the dakota runs are done. In which case, bail out and return 0: */ 51 #ifdef _HAVE_MPI_ 52 MPI_Bcast(&counter,1,MPI_INT,0,IssmComm::GetComm()); 53 #endif 51 ISSM_MPI_Bcast(&counter,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 54 52 if(counter==-1)return 0; 55 53 … … 116 114 117 115 /*numvariables: */ 118 MPI_Bcast(&numvariables,1,MPI_INT,0,IssmComm::GetComm());116 ISSM_MPI_Bcast(&numvariables,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 119 117 120 118 /*variables:*/ 121 119 if(my_rank!=0)variables=xNew<double>(numvariables); 122 MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,IssmComm::GetComm());120 ISSM_MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,IssmComm::GetComm()); 123 121 124 122 /*variables_descriptors: */ … … 131 129 string_length=(strlen(string)+1)*sizeof(char); 132 130 } 133 MPI_Bcast(&string_length,1,MPI_INT,0,IssmComm::GetComm());131 ISSM_MPI_Bcast(&string_length,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 134 132 if(my_rank!=0)string=xNew<char>(string_length); 135 MPI_Bcast(string,string_length,MPI_CHAR,0,IssmComm::GetComm());133 ISSM_MPI_Bcast(string,string_length,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 136 134 if(my_rank!=0)variables_descriptors[i]=string; 137 135 } 138 136 139 137 /*numresponses: */ 140 MPI_Bcast(&numresponses,1,MPI_INT,0,IssmComm::GetComm());138 ISSM_MPI_Bcast(&numresponses,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 141 139 142 140 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/analyses/EnvironmentFinalize.cpp
r14917 r15838 12 12 void EnvironmentFinalize(void){ 13 13 14 #ifdef _HAVE_MPI_14 int my_rank; 15 15 16 16 /*Make sure we are all here*/ 17 MPI_Barrier(MPI_COMM_WORLD);17 ISSM_MPI_Barrier(ISSM_MPI_COMM_WORLD); 18 18 19 19 /*Print closing statement*/ 20 int my_rank; 21 MPI_Comm_rank(MPI_COMM_WORLD,&my_rank); 20 ISSM_MPI_Comm_rank(ISSM_MPI_COMM_WORLD,&my_rank); 22 21 if(!my_rank) printf("closing MPI\n"); 23 22 24 23 /*Finalize: */ 25 MPI_Finalize(); 26 27 #endif 24 ISSM_MPI_Finalize(); 28 25 } -
issm/trunk-jpl/src/c/analyses/EnvironmentInit.cpp
r14917 r15838 17 17 /*Initialize MPI environment: */ 18 18 #if defined(_HAVE_MPI_) 19 MPI_Init(&argc,&argv);20 comm = MPI_COMM_WORLD;19 ISSM_MPI_Init(&argc,&argv); 20 comm = ISSM_MPI_COMM_WORLD; 21 21 #else 22 22 comm = 1; //bogus number for comm, which does not exist anyway. … … 25 25 /*Print Banner*/ 26 26 int my_rank = 0; 27 #ifdef _HAVE_MPI_ 28 MPI_Comm_rank(comm,&my_rank); 29 #endif 27 ISSM_MPI_Comm_rank(comm,&my_rank); 30 28 if(!my_rank) printf("\n"); 31 29 if(!my_rank) printf("Ice Sheet System Model (%s) version %s\n",PACKAGE_NAME,PACKAGE_VERSION); -
issm/trunk-jpl/src/c/classes/Constraints/Constraints.cpp
r15012 r15838 29 29 30 30 /*figure out total number of constraints combining all the cpus (no clones here)*/ 31 #ifdef _HAVE_MPI_ 32 MPI_Reduce(&localconstraints,&numberofconstraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 33 MPI_Bcast(&numberofconstraints,1,MPI_INT,0,IssmComm::GetComm()); 34 #else 35 numberofconstraints=localconstraints; 36 #endif 31 ISSM_MPI_Reduce(&localconstraints,&numberofconstraints,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 32 ISSM_MPI_Bcast(&numberofconstraints,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 37 33 38 34 return numberofconstraints; -
issm/trunk-jpl/src/c/classes/Elements/Elements.cpp
r15375 r15838 114 114 115 115 /*Synchronize across cluster, so as to not end up with different sizes for each patch on each cpu: */ 116 #ifdef _HAVE_MPI_ 117 MPI_Reduce (&numvertices,&max_numvertices,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 118 MPI_Bcast(&max_numvertices,1,MPI_INT,0,IssmComm::GetComm()); 116 ISSM_MPI_Reduce (&numvertices,&max_numvertices,1,ISSM_MPI_INT,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 117 ISSM_MPI_Bcast(&max_numvertices,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 119 118 numvertices=max_numvertices; 120 119 121 MPI_Reduce (&numnodes,&max_numnodes,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() );122 MPI_Bcast(&max_numnodes,1,MPI_INT,0,IssmComm::GetComm());120 ISSM_MPI_Reduce (&numnodes,&max_numnodes,1,ISSM_MPI_INT,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 121 ISSM_MPI_Bcast(&max_numnodes,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 123 122 numnodes=max_numnodes; 124 #endif125 123 126 124 /*Ok, initialize Patch object: */ … … 186 184 187 185 /*Get rank of first cpu that has results*/ 188 #ifdef _HAVE_MPI_189 186 if(this->Size()) rank=my_rank; 190 187 else rank=num_procs; 191 MPI_Allreduce (&rank,&minrank,1,MPI_INT,MPI_MIN,IssmComm::GetComm()); 192 #else 193 minrank=my_rank; 194 #endif 188 ISSM_MPI_Allreduce (&rank,&minrank,1,ISSM_MPI_INT,ISSM_MPI_MIN,IssmComm::GetComm()); 195 189 196 190 /*see what the first element of this partition has in stock (this is common to all partitions)*/ … … 200 194 element->ListResultsInfo(&resultsenums,&resultssizes,&resultstimes,&resultssteps,&numberofresults); 201 195 } 202 #ifdef _HAVE_MPI_ 203 MPI_Bcast(&numberofresults,1,MPI_DOUBLE,minrank,IssmComm::GetComm()); 204 #endif 196 ISSM_MPI_Bcast(&numberofresults,1,ISSM_MPI_DOUBLE,minrank,IssmComm::GetComm()); 205 197 206 198 /*Get out if there is no results. Otherwise broadcast info*/ 207 199 if(!numberofresults) return; 208 #ifdef _HAVE_MPI_209 200 if(my_rank!=minrank){ 210 201 resultsenums=xNew<int>(numberofresults); … … 213 204 resultssteps=xNew<int>(numberofresults); 214 205 } 215 MPI_Bcast(resultsenums,numberofresults,MPI_INT,minrank,IssmComm::GetComm()); 216 MPI_Bcast(resultssizes,numberofresults,MPI_INT,minrank,IssmComm::GetComm()); 217 MPI_Bcast(resultstimes,numberofresults,MPI_DOUBLE,minrank,IssmComm::GetComm()); 218 MPI_Bcast(resultssteps,numberofresults,MPI_INT,minrank,IssmComm::GetComm()); 219 #endif 206 ISSM_MPI_Bcast(resultsenums,numberofresults,ISSM_MPI_INT,minrank,IssmComm::GetComm()); 207 ISSM_MPI_Bcast(resultssizes,numberofresults,ISSM_MPI_INT,minrank,IssmComm::GetComm()); 208 ISSM_MPI_Bcast(resultstimes,numberofresults,ISSM_MPI_DOUBLE,minrank,IssmComm::GetComm()); 209 ISSM_MPI_Bcast(resultssteps,numberofresults,ISSM_MPI_INT,minrank,IssmComm::GetComm()); 220 210 221 211 /*Loop over all results and get nodal vector*/ … … 299 289 300 290 /*Grab max of all cpus: */ 301 #ifdef _HAVE_MPI_ 302 MPI_Allreduce((void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 291 ISSM_MPI_Allreduce((void*)&max,(void*)&allmax,1,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 303 292 max=allmax; 304 #endif305 293 306 294 return max; … … 314 302 315 303 local_nelem=this->Size(); 316 #ifdef _HAVE_MPI_ 317 MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 318 #else 319 numberofelements=local_nelem; 320 #endif 304 ISSM_MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 321 305 322 306 return numberofelements; -
issm/trunk-jpl/src/c/classes/FemModel.cpp
r15726 r15838 569 569 570 570 /*sum over all cpus*/ 571 #ifdef _HAVE_MPI_ 572 MPI_Allreduce((void*)connectivity_clone,(void*)all_connectivity_clone,numnodes,MPI_INT,MPI_SUM,IssmComm::GetComm()); 573 #endif 571 ISSM_MPI_Allreduce((void*)connectivity_clone,(void*)all_connectivity_clone,numnodes,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 574 572 xDelete<int>(connectivity_clone); 575 573 … … 965 963 966 964 /*Figure out minimum across the cluster: */ 967 #ifdef _HAVE_MPI_ 968 MPI_Reduce (&min_dt,&node_min_dt,1,MPI_DOUBLE,MPI_MIN,0,IssmComm::GetComm() ); 969 MPI_Bcast(&node_min_dt,1,MPI_DOUBLE,0,IssmComm::GetComm()); 965 ISSM_MPI_Reduce (&min_dt,&node_min_dt,1,ISSM_MPI_DOUBLE,ISSM_MPI_MIN,0,IssmComm::GetComm() ); 966 ISSM_MPI_Bcast(&node_min_dt,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 970 967 min_dt=node_min_dt; 971 #endif972 968 973 969 /*Assign output pointers:*/ … … 1018 1014 } 1019 1015 1020 #ifdef _HAVE_MPI_ 1021 MPI_Allreduce ( (void*)&mass_flux,(void*)&all_mass_flux,1,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm()); 1016 ISSM_MPI_Allreduce ( (void*)&mass_flux,(void*)&all_mass_flux,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm()); 1022 1017 mass_flux=all_mass_flux; 1023 #endif1024 1018 1025 1019 /*Free ressources:*/ … … 1052 1046 1053 1047 /*Figure out maximum across the cluster: */ 1054 #ifdef _HAVE_MPI_ 1055 MPI_Reduce(&maxabsvx,&node_maxabsvx,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1056 MPI_Bcast(&node_maxabsvx,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1048 ISSM_MPI_Reduce(&maxabsvx,&node_maxabsvx,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1049 ISSM_MPI_Bcast(&node_maxabsvx,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1057 1050 maxabsvx=node_maxabsvx; 1058 #endif1059 1051 1060 1052 /*Assign output pointers:*/ … … 1078 1070 1079 1071 /*Figure out maximum across the cluster: */ 1080 #ifdef _HAVE_MPI_ 1081 MPI_Reduce(&maxabsvy,&node_maxabsvy,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1082 MPI_Bcast(&node_maxabsvy,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1072 ISSM_MPI_Reduce(&maxabsvy,&node_maxabsvy,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1073 ISSM_MPI_Bcast(&node_maxabsvy,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1083 1074 maxabsvy=node_maxabsvy; 1084 #endif1085 1075 1086 1076 /*Assign output pointers:*/ … … 1104 1094 1105 1095 /*Figure out maximum across the cluster: */ 1106 #ifdef _HAVE_MPI_ 1107 MPI_Reduce(&maxabsvz,&node_maxabsvz,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1108 MPI_Bcast(&node_maxabsvz,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1096 ISSM_MPI_Reduce(&maxabsvz,&node_maxabsvz,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1097 ISSM_MPI_Bcast(&node_maxabsvz,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1109 1098 maxabsvz=node_maxabsvz; 1110 #endif1111 1099 1112 1100 /*Assign output pointers:*/ … … 1130 1118 1131 1119 /*Figure out maximum across the cluster: */ 1132 #ifdef _HAVE_MPI_ 1133 MPI_Reduce(&maxvel,&node_maxvel,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1134 MPI_Bcast(&node_maxvel,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1120 ISSM_MPI_Reduce(&maxvel,&node_maxvel,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1121 ISSM_MPI_Bcast(&node_maxvel,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1135 1122 maxvel=node_maxvel; 1136 #endif1137 1123 1138 1124 /*Assign output pointers:*/ … … 1156 1142 1157 1143 /*Figure out maximum across the cluster: */ 1158 #ifdef _HAVE_MPI_ 1159 MPI_Reduce(&maxvx,&node_maxvx,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1160 MPI_Bcast(&node_maxvx,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1144 ISSM_MPI_Reduce(&maxvx,&node_maxvx,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1145 ISSM_MPI_Bcast(&node_maxvx,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1161 1146 maxvx=node_maxvx; 1162 #endif1163 1147 1164 1148 /*Assign output pointers:*/ … … 1182 1166 1183 1167 /*Figure out maximum across the cluster: */ 1184 #ifdef _HAVE_MPI_ 1185 MPI_Reduce(&maxvy,&node_maxvy,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1186 MPI_Bcast(&node_maxvy,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1168 ISSM_MPI_Reduce(&maxvy,&node_maxvy,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1169 ISSM_MPI_Bcast(&node_maxvy,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1187 1170 maxvy=node_maxvy; 1188 #endif1189 1171 1190 1172 /*Assign output pointers:*/ … … 1208 1190 1209 1191 /*Figure out maximum across the cluster: */ 1210 #ifdef _HAVE_MPI_ 1211 MPI_Reduce(&maxvz,&node_maxvz,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1212 MPI_Bcast(&node_maxvz,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1192 ISSM_MPI_Reduce(&maxvz,&node_maxvz,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1193 ISSM_MPI_Bcast(&node_maxvz,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1213 1194 maxvz=node_maxvz; 1214 #endif1215 1195 1216 1196 /*Assign output pointers:*/ … … 1234 1214 1235 1215 /*Figure out minimum across the cluster: */ 1236 #ifdef _HAVE_MPI_ 1237 MPI_Reduce(&minvel,&node_minvel,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1238 MPI_Bcast(&node_minvel,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1216 ISSM_MPI_Reduce(&minvel,&node_minvel,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1217 ISSM_MPI_Bcast(&node_minvel,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1239 1218 minvel=node_minvel; 1240 #endif1241 1219 1242 1220 /*Assign output pointers:*/ … … 1260 1238 1261 1239 /*Figure out minimum across the cluster: */ 1262 #ifdef _HAVE_MPI_ 1263 MPI_Reduce(&minvx,&node_minvx,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1264 MPI_Bcast(&node_minvx,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1240 ISSM_MPI_Reduce(&minvx,&node_minvx,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1241 ISSM_MPI_Bcast(&node_minvx,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1265 1242 minvx=node_minvx; 1266 #endif1267 1243 1268 1244 /*Assign output pointers:*/ … … 1286 1262 1287 1263 /*Figure out minimum across the cluster: */ 1288 #ifdef _HAVE_MPI_ 1289 MPI_Reduce(&minvy,&node_minvy,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1290 MPI_Bcast(&node_minvy,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1264 ISSM_MPI_Reduce(&minvy,&node_minvy,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1265 ISSM_MPI_Bcast(&node_minvy,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1291 1266 minvy=node_minvy; 1292 #endif1293 1267 1294 1268 /*Assign output pointers:*/ … … 1312 1286 1313 1287 /*Figure out minimum across the cluster: */ 1314 #ifdef _HAVE_MPI_ 1315 MPI_Reduce(&minvz,&node_minvz,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 1316 MPI_Bcast(&node_minvz,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1288 ISSM_MPI_Reduce(&minvz,&node_minvz,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 1289 ISSM_MPI_Bcast(&node_minvz,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1317 1290 minvz=node_minvz; 1318 #endif1319 1291 1320 1292 /*Assign output pointers:*/ … … 1331 1303 local_smb+=element->TotalSmb(); 1332 1304 } 1333 #ifdef _HAVE_MPI_ 1334 MPI_Reduce(&local_smb,&total_smb,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 1335 MPI_Bcast(&total_smb,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1336 #else 1337 total_smb=local_smb; 1338 #endif 1305 ISSM_MPI_Reduce(&local_smb,&total_smb,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 1306 ISSM_MPI_Bcast(&total_smb,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1339 1307 1340 1308 /*Assign output pointers: */ … … 1351 1319 local_ice_volume+=element->IceVolume(); 1352 1320 } 1353 #ifdef _HAVE_MPI_ 1354 MPI_Reduce(&local_ice_volume,&total_ice_volume,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 1355 MPI_Bcast(&total_ice_volume,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1356 #else 1357 total_ice_volume=local_ice_volume; 1358 #endif 1321 ISSM_MPI_Reduce(&local_ice_volume,&total_ice_volume,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 1322 ISSM_MPI_Bcast(&total_ice_volume,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1359 1323 1360 1324 /*Assign output pointers: */ … … 1386 1350 1387 1351 /*Broadcast whether we found the element: */ 1388 #ifdef _HAVE_MPI_ 1389 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 1352 ISSM_MPI_Allreduce ( &found,&sumfound,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 1390 1353 if(!sumfound)_error_("could not find material with id" << index << " to compute ElementResponse"); 1391 #endif1392 1354 1393 1355 /*Ok, we found the element, compute responseocity: */ … … 1397 1359 1398 1360 /*Broadcast and plug into response: */ 1399 #ifdef _HAVE_MPI_ 1400 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 1401 MPI_Bcast(&response,1,MPI_DOUBLE,cpu_found,IssmComm::GetComm()); 1402 #endif 1361 ISSM_MPI_Allreduce ( &cpu_found,&cpu_found,1,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 1362 ISSM_MPI_Bcast(&response,1,ISSM_MPI_DOUBLE,cpu_found,IssmComm::GetComm()); 1403 1363 1404 1364 /*Assign output pointers: */ … … 1417 1377 J+=element->BalancethicknessMisfit(weight_index); 1418 1378 } 1419 #ifdef _HAVE_MPI_ 1420 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 1421 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1379 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 1380 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1422 1381 J=J_sum; 1423 #endif1424 1382 1425 1383 /*Assign output pointers: */ … … 1444 1402 1445 1403 /*Sum all J from all cpus of the cluster:*/ 1446 #ifdef _HAVE_MPI_ 1447 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 1448 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 1404 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 1405 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 1449 1406 J=J_sum; 1450 #endif1451 1407 1452 1408 /*Assign output pointers: */ … … 1653 1609 } 1654 1610 xDelete<IssmDouble>(serial_active); 1655 #ifdef _HAVE_MPI_1656 1611 int sum_counter; 1657 MPI_Reduce(&counter,&sum_counter,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() );1658 MPI_Bcast(&sum_counter,1,MPI_INT,0,IssmComm::GetComm());1612 ISSM_MPI_Reduce(&counter,&sum_counter,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 1613 ISSM_MPI_Bcast(&sum_counter,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 1659 1614 counter=sum_counter; 1660 #endif1661 1615 if(VerboseSolution()) _printf0_(" Number of active nodes in EPL layer: "<< counter <<"\n"); 1662 1616 -
issm/trunk-jpl/src/c/classes/IndependentObject.cpp
r15643 r15838 111 111 scalar<<=pscalar; 112 112 113 #ifdef _HAVE_MPI_ 114 MPI_Bcast(&scalar,1,MPI_DOUBLE,0,IssmComm::GetComm()); 115 #endif 113 ISSM_MPI_Bcast(&scalar,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 116 114 117 115 /*Ok, we are almost done. scalar is now an independent variable. We don't want this variable to be fetched again in the … … 145 143 if(fread(&M,sizeof(int),1,fid)!=1) _error_("could not read number of rows for matrix "); 146 144 } 147 #ifdef _HAVE_MPI_ 148 MPI_Bcast(&M,1,MPI_INT,0,IssmComm::GetComm()); 149 #endif 145 ISSM_MPI_Bcast(&M,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 150 146 151 147 if(my_rank==0){ 152 148 if(fread(&N,sizeof(int),1,fid)!=1) _error_("could not read number of columns for matrix "); 153 149 } 154 #ifdef _HAVE_MPI_ 155 MPI_Bcast(&N,1,MPI_INT,0,IssmComm::GetComm()); 156 #endif 150 ISSM_MPI_Bcast(&N,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 157 151 158 152 /*Now allocate matrix: */ … … 168 162 for (int i=0;i<M*N;++i) matrix[i]<<=buffer[i]; /*we use the <<= ADOLC overloaded operator to declare the independency*/ 169 163 } 170 #ifdef _HAVE_MPI_ 171 MPI_Bcast(matrix,M*N,MPI_DOUBLE,0,IssmComm::GetComm()); 172 #endif 164 ISSM_MPI_Bcast(matrix,M*N,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 173 165 174 166 xDelete<IssmPDouble>(buffer); -
issm/trunk-jpl/src/c/classes/IoModel.cpp
r15749 r15838 343 343 /*Ok, we have reached the end of the file. break: */ 344 344 record_code=0; //0 means bailout 345 #ifdef _HAVE_MPI_ 346 MPI_Bcast(&record_code,1,MPI_INT,0,IssmComm::GetComm()); /*tell others cpus we are bailing: */ 347 #endif 345 ISSM_MPI_Bcast(&record_code,1,ISSM_MPI_INT,0,IssmComm::GetComm()); /*tell others cpus we are bailing: */ 348 346 break; 349 347 } … … 354 352 if(fread(&record_code ,sizeof(int),1,this->fid)!=1) _error_("Cound not read record_code"); 355 353 356 #ifdef _HAVE_MPI_357 354 /*Tell other cpus what we are doing: */ 358 MPI_Bcast(&record_code,1,MPI_INT,0,IssmComm::GetComm()); /*tell other cpus what we are going to do: */355 ISSM_MPI_Bcast(&record_code,1,ISSM_MPI_INT,0,IssmComm::GetComm()); /*tell other cpus what we are going to do: */ 359 356 360 357 /*Tell other cpus the name of the data, then branch according to the data type: */ 361 MPI_Bcast(&record_enum,1,MPI_INT,0,IssmComm::GetComm()); 362 MPI_Bcast(&record_length,1,MPI_INT,0,IssmComm::GetComm()); 363 #endif 358 ISSM_MPI_Bcast(&record_enum,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 359 ISSM_MPI_Bcast(&record_length,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 364 360 365 361 switch(record_code){ … … 367 363 /*Read the boolean and broadcast it to other cpus:*/ 368 364 if(fread(&booleanint,sizeof(int),1,this->fid)!=1) _error_("could not read boolean "); 369 #ifdef _HAVE_MPI_ 370 MPI_Bcast(&booleanint,1,MPI_INT,0,IssmComm::GetComm()); 371 #endif 365 ISSM_MPI_Bcast(&booleanint,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 372 366 373 367 /*create BoolParam: */ … … 378 372 /*Read the integer and broadcast it to other cpus:*/ 379 373 if(fread(&integer,sizeof(int),1,this->fid)!=1) _error_("could not read integer "); 380 #ifdef _HAVE_MPI_ 381 MPI_Bcast(&integer,1,MPI_INT,0,IssmComm::GetComm()); 382 #endif 374 ISSM_MPI_Bcast(&integer,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 383 375 384 376 /*create IntParam: */ … … 395 387 else{ 396 388 if(fread(&pscalar,sizeof(IssmPDouble),1,this->fid)!=1) _error_("could not read scalar "); 397 #ifdef _HAVE_MPI_ 398 MPI_Bcast(&pscalar,1,MPI_DOUBLE,0,IssmComm::GetComm()); 399 #endif 389 ISSM_MPI_Bcast(&pscalar,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 400 390 scalar=reCast<IssmDouble>(pscalar); 401 391 } … … 408 398 /*We have to read a string from disk. First read the dimensions of the string, then the string: */ 409 399 if(fread(&string_size,sizeof(int),1,this->fid)!=1) _error_("could not read length of string "); 410 #ifdef _HAVE_MPI_ 411 MPI_Bcast(&string_size,1,MPI_INT,0,IssmComm::GetComm()); 412 #endif 400 ISSM_MPI_Bcast(&string_size,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 413 401 414 402 if(string_size){ … … 418 406 /*Read string, then broadcast: */ 419 407 if(fread(string,string_size*sizeof(char),1,this->fid)!=1)_error_(" could not read string "); 420 #ifdef _HAVE_MPI_ 421 MPI_Bcast(string,string_size,MPI_CHAR,0,IssmComm::GetComm()); 422 #endif 408 ISSM_MPI_Bcast(string,string_size,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 423 409 } 424 410 else{ … … 474 460 } 475 461 } //}}} 476 #ifdef _HAVE_MPI_477 462 else{ //cpu ~0 {{{ 478 463 for(;;){ //wait on cpu 0 479 MPI_Bcast(&record_code,1,MPI_INT,0,IssmComm::GetComm()); /*get from cpu 0 what we are going to do: */464 ISSM_MPI_Bcast(&record_code,1,ISSM_MPI_INT,0,IssmComm::GetComm()); /*get from cpu 0 what we are going to do: */ 480 465 if(record_code==0){ 481 466 break; //we are done, break from the loop 482 467 } 483 468 else{ 484 MPI_Bcast(&record_enum,1,MPI_INT,0,IssmComm::GetComm()); //get from cpu 0 name of the data485 MPI_Bcast(&record_length,1,MPI_INT,0,IssmComm::GetComm());469 ISSM_MPI_Bcast(&record_enum,1,ISSM_MPI_INT,0,IssmComm::GetComm()); //get from cpu 0 name of the data 470 ISSM_MPI_Bcast(&record_length,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 486 471 switch(record_code){ 487 472 case 1: 488 473 /*boolean. get it from cpu 0 */ 489 MPI_Bcast(&booleanint,1,MPI_INT,0,IssmComm::GetComm());474 ISSM_MPI_Bcast(&booleanint,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 490 475 491 476 /*create BoolParam: */ … … 495 480 case 2: 496 481 /*integer. get it from cpu 0 */ 497 MPI_Bcast(&integer,1,MPI_INT,0,IssmComm::GetComm());482 ISSM_MPI_Bcast(&integer,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 498 483 499 484 /*create IntParam: */ … … 503 488 case 3: 504 489 /*scalar. get it from cpu 0 */ 505 MPI_Bcast(&scalar,1,MPI_DOUBLE,0,IssmComm::GetComm());490 ISSM_MPI_Bcast(&scalar,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 506 491 507 492 /*create DoubleParam: */ … … 510 495 break; 511 496 case 4: 512 MPI_Bcast(&string_size,1,MPI_INT,0,IssmComm::GetComm());497 ISSM_MPI_Bcast(&string_size,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 513 498 if(string_size){ 514 499 string=xNew<char>((string_size+1)); … … 516 501 517 502 /*Read string from cpu 0: */ 518 MPI_Bcast(string,string_size,MPI_CHAR,0,IssmComm::GetComm());503 ISSM_MPI_Bcast(string,string_size,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 519 504 } 520 505 else{ … … 543 528 } 544 529 } //}}} 545 #endif546 530 } 547 531 /*}}}*/ … … 567 551 if(fread(&booleanint,sizeof(int),1,fid)!=1) _error_("could not read boolean "); 568 552 } 569 #ifdef _HAVE_MPI_ 570 MPI_Bcast(&booleanint,1,MPI_INT,0,IssmComm::GetComm()); 571 #endif 553 ISSM_MPI_Bcast(&booleanint,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 572 554 573 555 /*cast to bool: */ … … 599 581 } 600 582 601 #ifdef _HAVE_MPI_ 602 MPI_Bcast(&integer,1,MPI_INT,0,IssmComm::GetComm()); 603 #endif 583 ISSM_MPI_Bcast(&integer,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 604 584 605 585 /*Assign output pointers: */ … … 628 608 if(fread(&scalar,sizeof(IssmPDouble),1,fid)!=1)_error_("could not read scalar "); 629 609 } 630 #ifdef _HAVE_MPI_ 631 MPI_Bcast(&scalar,1,MPI_DOUBLE,0,IssmComm::GetComm()); 632 #endif 610 ISSM_MPI_Bcast(&scalar,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 633 611 634 612 /*Assign output pointers: */ … … 662 640 } 663 641 664 #ifdef _HAVE_MPI_ 665 MPI_Bcast(&string_size,1,MPI_INT,0,IssmComm::GetComm()); 666 #endif 642 ISSM_MPI_Bcast(&string_size,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 667 643 668 644 /*Now allocate string: */ … … 675 651 if(fread(string,string_size*sizeof(char),1,fid)!=1)_error_(" could not read string "); 676 652 } 677 #ifdef _HAVE_MPI_ 678 MPI_Bcast(string,string_size,MPI_CHAR,0,IssmComm::GetComm()); 679 #endif 653 ISSM_MPI_Bcast(string,string_size,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 680 654 } 681 655 else{ … … 716 690 } 717 691 718 #ifdef _HAVE_MPI_ 719 MPI_Bcast(&M,1,MPI_INT,0,IssmComm::GetComm()); 720 #endif 692 ISSM_MPI_Bcast(&M,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 721 693 722 694 if(my_rank==0){ 723 695 if(fread(&N,sizeof(int),1,fid)!=1) _error_("could not read number of columns for matrix "); 724 696 } 725 #ifdef _HAVE_MPI_ 726 MPI_Bcast(&N,1,MPI_INT,0,IssmComm::GetComm()); 727 #endif 697 ISSM_MPI_Bcast(&N,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 728 698 729 699 /*Now allocate matrix: */ … … 736 706 } 737 707 738 #ifdef _HAVE_MPI_ 739 MPI_Bcast(matrix,M*N,MPI_DOUBLE,0,IssmComm::GetComm()); 740 #endif 708 ISSM_MPI_Bcast(matrix,M*N,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 741 709 } 742 710 … … 787 755 if(fread(&M,sizeof(int),1,fid)!=1) _error_("could not read number of rows for matrix "); 788 756 } 789 #ifdef _HAVE_MPI_ 790 MPI_Bcast(&M,1,MPI_INT,0,IssmComm::GetComm()); 791 #endif 757 ISSM_MPI_Bcast(&M,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 792 758 793 759 if(my_rank==0){ 794 760 if(fread(&N,sizeof(int),1,fid)!=1) _error_("could not read number of columns for matrix "); 795 761 } 796 #ifdef _HAVE_MPI_ 797 MPI_Bcast(&N,1,MPI_INT,0,IssmComm::GetComm()); 798 #endif 762 ISSM_MPI_Bcast(&N,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 799 763 800 764 /*Now allocate matrix: */ … … 806 770 if(fread(matrix,M*N*sizeof(IssmPDouble),1,fid)!=1) _error_("could not read matrix "); 807 771 } 808 #ifdef _HAVE_MPI_ 809 MPI_Bcast(matrix,M*N,MPI_DOUBLE,0,IssmComm::GetComm()); 810 #endif 772 ISSM_MPI_Bcast(matrix,M*N,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 811 773 812 774 _assert_(this->independents); … … 857 819 if(fread(&numstrings,sizeof(int),1,fid)!=1) _error_("could not read length of string array"); 858 820 } 859 #ifdef _HAVE_MPI_ 860 MPI_Bcast(&numstrings,1,MPI_INT,0,IssmComm::GetComm()); 861 #endif 821 ISSM_MPI_Bcast(&numstrings,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 862 822 863 823 /*Now allocate string array: */ … … 872 832 if(fread(&string_size,sizeof(int),1,fid)!=1) _error_("could not read length of string "); 873 833 } 874 #ifdef _HAVE_MPI_ 875 MPI_Bcast(&string_size,1,MPI_INT,0,IssmComm::GetComm()); 876 #endif 834 ISSM_MPI_Bcast(&string_size,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 877 835 if(string_size){ 878 836 string=xNew<char>((string_size+1)); … … 883 841 if(fread(string,string_size*sizeof(char),1,fid)!=1)_error_(" could not read string "); 884 842 } 885 #ifdef _HAVE_MPI_ 886 MPI_Bcast(string,string_size,MPI_CHAR,0,IssmComm::GetComm()); 887 #endif 843 ISSM_MPI_Bcast(string,string_size,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 888 844 } 889 845 else{ … … 930 886 if(fread(&numrecords,sizeof(int),1,fid)!=1) _error_("could not read number of records in matrix array "); 931 887 } 932 #ifdef _HAVE_MPI_ 933 MPI_Bcast(&numrecords,1,MPI_INT,0,IssmComm::GetComm()); 934 #endif 888 ISSM_MPI_Bcast(&numrecords,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 935 889 936 890 if(numrecords){ … … 953 907 if(fread(&M,sizeof(int),1,fid)!=1) _error_("could not read number of rows in " << i << "th matrix of matrix array"); 954 908 } 955 #ifdef _HAVE_MPI_ 956 MPI_Bcast(&M,1,MPI_INT,0,IssmComm::GetComm()); 957 #endif 909 ISSM_MPI_Bcast(&M,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 958 910 959 911 if(my_rank==0){ 960 912 if(fread(&N,sizeof(int),1,fid)!=1) _error_("could not read number of columns in " << i << "th matrix of matrix array"); 961 913 } 962 #ifdef _HAVE_MPI_ 963 MPI_Bcast(&N,1,MPI_INT,0,IssmComm::GetComm()); 964 #endif 914 ISSM_MPI_Bcast(&N,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 965 915 966 916 /*Now allocate matrix: */ … … 973 923 } 974 924 975 #ifdef _HAVE_MPI_ 976 MPI_Bcast(matrix,M*N,MPI_DOUBLE,0,IssmComm::GetComm()); 977 #endif 925 ISSM_MPI_Bcast(matrix,M*N,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 978 926 matrices[i]=xNew<IssmDouble>(M*N); 979 927 for (int j=0;j<M*N;++j) {matrices[i][j]=matrix[j];} … … 1307 1255 } 1308 1256 /*Broadcast code and vector type: */ 1309 #ifdef _HAVE_MPI_ 1310 MPI_Bcast(&lastindex,1,MPI_INT,0,IssmComm::GetComm()); 1311 #endif 1257 ISSM_MPI_Bcast(&lastindex,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 1312 1258 1313 1259 /*Assign output pointers:*/ … … 1365 1311 } 1366 1312 } 1367 #ifdef _HAVE_MPI_ 1368 MPI_Bcast(&found,1,MPI_INT,0,IssmComm::GetComm()); 1313 ISSM_MPI_Bcast(&found,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 1369 1314 if(!found)_error_("could not find data with name " << EnumToStringx(data_enum) << " in binary file"); 1370 #endif1371 1315 1372 1316 /*Broadcast code and vector type: */ 1373 #ifdef _HAVE_MPI_ 1374 MPI_Bcast(&record_code,1,MPI_INT,0,IssmComm::GetComm()); 1375 MPI_Bcast(&vector_type,1,MPI_INT,0,IssmComm::GetComm()); 1376 if(record_code==5) MPI_Bcast(&vector_type,1,MPI_INT,0,IssmComm::GetComm()); 1377 #endif 1317 ISSM_MPI_Bcast(&record_code,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 1318 ISSM_MPI_Bcast(&vector_type,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 1319 if(record_code==5) ISSM_MPI_Bcast(&vector_type,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 1378 1320 1379 1321 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/classes/Loads/Loads.cpp
r15012 r15838 67 67 68 68 /*Grab sum of all cpus: */ 69 #ifdef _HAVE_MPI_ 70 MPI_Allreduce((void*)&ispenalty,(void*)&allispenalty,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 69 ISSM_MPI_Allreduce((void*)&ispenalty,(void*)&allispenalty,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 71 70 ispenalty=allispenalty; 72 #endif73 71 74 72 if(ispenalty) … … 96 94 97 95 /*Grab max of all cpus: */ 98 #ifdef _HAVE_MPI_ 99 MPI_Allreduce((void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 96 ISSM_MPI_Allreduce((void*)&max,(void*)&allmax,1,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 100 97 max=allmax; 101 #endif102 98 103 99 return max; … … 114 110 115 111 /*figure out total number of loads combining all the cpus (no clones here)*/ 116 #ifdef _HAVE_MPI_ 117 MPI_Reduce(&localloads,&numberofloads,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 118 MPI_Bcast(&numberofloads,1,MPI_INT,0,IssmComm::GetComm()); 119 #else 120 numberofloads=localloads; 121 #endif 112 ISSM_MPI_Reduce(&localloads,&numberofloads,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 113 ISSM_MPI_Bcast(&numberofloads,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 122 114 123 115 return numberofloads; … … 140 132 141 133 /*figure out total number of loads combining all the cpus (no clones here)*/ 142 #ifdef _HAVE_MPI_ 143 MPI_Reduce(&localloads,&numberofloads,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 144 MPI_Bcast(&numberofloads,1,MPI_INT,0,IssmComm::GetComm()); 145 #else 146 numberofloads=localloads; 147 #endif 134 ISSM_MPI_Reduce(&localloads,&numberofloads,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 135 ISSM_MPI_Bcast(&numberofloads,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 148 136 149 137 return numberofloads; -
issm/trunk-jpl/src/c/classes/Nodes.cpp
r15639 r15838 65 65 * First: get number of dofs for each cpu*/ 66 66 alldofcount=xNew<int>(num_procs); 67 #ifdef _HAVE_MPI_ 68 MPI_Gather(&dofcount,1,MPI_INT,alldofcount,1,MPI_INT,0,IssmComm::GetComm()); 69 MPI_Bcast(alldofcount,num_procs,MPI_INT,0,IssmComm::GetComm()); 70 #else 71 alldofcount[0]=dofcount; 72 #endif 67 ISSM_MPI_Gather(&dofcount,1,ISSM_MPI_INT,alldofcount,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 68 ISSM_MPI_Bcast(alldofcount,num_procs,ISSM_MPI_INT,0,IssmComm::GetComm()); 73 69 74 70 /* Every cpu should start its own dof count at the end of the dofcount from cpu-1*/ … … 102 98 } 103 99 104 #ifdef _HAVE_MPI_ 105 MPI_Allreduce((void*)truedofs,(void*)alltruedofs,numnodes*maxdofspernode,MPI_INT,MPI_MAX,IssmComm::GetComm()); 106 #else 107 for(i=0;i<numnodes*maxdofspernode;i++)alltruedofs[i]=truedofs[i]; 108 #endif 100 ISSM_MPI_Allreduce((void*)truedofs,(void*)alltruedofs,numnodes*maxdofspernode,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 109 101 110 102 /* Now every cpu knows the true dofs of everyone else that is not a clone*/ … … 147 139 * dealt with by another cpu. We take the minimum because we are going to manage dof assignment in increasing 148 140 * order of cpu rank. This is also why we initialized this array to num_procs.*/ 149 #ifdef _HAVE_MPI_ 150 MPI_Allreduce((void*)ranks,(void*)minranks,numnodes,MPI_INT,MPI_MIN,IssmComm::GetComm()); 151 #else 152 for(i=0;i<numnodes;i++)minranks[i]=ranks[i]; 153 #endif 141 ISSM_MPI_Allreduce((void*)ranks,(void*)minranks,numnodes,ISSM_MPI_INT,ISSM_MPI_MIN,IssmComm::GetComm()); 154 142 155 143 /*Now go through all objects, and use minranks to flag which objects are cloned: */ … … 192 180 193 181 /*Grab max of all cpus: */ 194 #ifdef _HAVE_MPI_ 195 MPI_Allreduce((void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 182 ISSM_MPI_Allreduce((void*)&max,(void*)&allmax,1,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 196 183 max=allmax; 197 #endif198 184 199 185 return max; … … 225 211 226 212 /*Grab max of all cpus: */ 227 #ifdef _HAVE_MPI_ 228 MPI_Allreduce((void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 213 ISSM_MPI_Allreduce((void*)&max,(void*)&allmax,1,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 229 214 max=allmax; 230 #endif231 215 232 216 return max; … … 242 226 243 227 /*Gather from all cpus: */ 244 #ifdef _HAVE_MPI_ 245 MPI_Allreduce ( (void*)&numdofs,(void*)&allnumdofs,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 246 #else 247 allnumdofs=numdofs; 248 #endif 228 ISSM_MPI_Allreduce ( (void*)&numdofs,(void*)&allnumdofs,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 249 229 return allnumdofs; 250 230 } … … 290 270 291 271 /*Gather from all cpus: */ 292 #ifdef _HAVE_MPI_ 293 MPI_Allreduce ( (void*)&numnodes,(void*)&allnumnodes,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 294 #else 295 allnumnodes=numnodes; 296 #endif 272 ISSM_MPI_Allreduce ( (void*)&numnodes,(void*)&allnumnodes,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 297 273 298 274 return allnumnodes; … … 320 296 } 321 297 322 #ifdef _HAVE_MPI_ 323 MPI_Reduce (&max_sid,&node_max_sid,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 324 MPI_Bcast(&node_max_sid,1,MPI_INT,0,IssmComm::GetComm()); 298 ISSM_MPI_Reduce (&max_sid,&node_max_sid,1,ISSM_MPI_INT,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 299 ISSM_MPI_Bcast(&node_max_sid,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 325 300 max_sid=node_max_sid; 326 #endif327 301 328 302 /*sid starts at 0*/ -
issm/trunk-jpl/src/c/classes/Patch.cpp
r14996 r15838 111 111 int node_numrows; 112 112 IssmDouble *total_values = NULL; 113 #ifdef _HAVE_MPI_ 114 MPI_Status status; 115 #endif 113 ISSM_MPI_Status status; 116 114 117 115 /*recover my_rank:*/ … … 120 118 121 119 /*First, figure out total number of rows combining all the cpus: */ 122 #ifdef _HAVE_MPI_ 123 MPI_Reduce(&this->numrows,&total_numrows,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 124 MPI_Bcast(&total_numrows,1,MPI_INT,0,IssmComm::GetComm()); 125 #else 126 total_numrows=this->numrows; 127 #endif 120 ISSM_MPI_Reduce(&this->numrows,&total_numrows,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 121 ISSM_MPI_Bcast(&total_numrows,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 128 122 129 123 /*return if patch empty*/ … … 141 135 142 136 /*Now, ask other nodes to send their values: */ 143 #ifdef _HAVE_MPI_144 137 for(int i=1;i<num_procs;i++){ 145 138 if (my_rank==i){ 146 MPI_Send(&this->numrows,1,MPI_INT,0,1,IssmComm::GetComm());147 if (this->numrows) MPI_Send(this->values,this->numrows*this->numcols,MPI_DOUBLE,0,1,IssmComm::GetComm());139 ISSM_MPI_Send(&this->numrows,1,ISSM_MPI_INT,0,1,IssmComm::GetComm()); 140 if (this->numrows)ISSM_MPI_Send(this->values,this->numrows*this->numcols,ISSM_MPI_DOUBLE,0,1,IssmComm::GetComm()); 148 141 } 149 142 if (my_rank==0){ 150 MPI_Recv(&node_numrows,1,MPI_INT,i,1,IssmComm::GetComm(),&status);151 if (node_numrows) MPI_Recv(total_values+count,node_numrows*this->numcols,MPI_DOUBLE,i,1,IssmComm::GetComm(),&status);143 ISSM_MPI_Recv(&node_numrows,1,ISSM_MPI_INT,i,1,IssmComm::GetComm(),&status); 144 if (node_numrows)ISSM_MPI_Recv(total_values+count,node_numrows*this->numcols,ISSM_MPI_DOUBLE,i,1,IssmComm::GetComm(),&status); 152 145 count+=node_numrows*this->numcols; 153 146 } 154 147 } 155 #endif156 148 157 149 /*Now, node 0 has total_values, of size total_numrows*this->numcols. Update the fields in the patch, to reflect this new … … 162 154 this->values=total_values; 163 155 } 164 #ifdef _HAVE_MPI_165 156 else{ 166 157 this->numrows=0; 167 158 xDelete<IssmDouble>(this->values); 168 159 } 169 #endif170 160 }/*}}}*/ -
issm/trunk-jpl/src/c/classes/Profiler.cpp
r15104 r15838 73 73 *in the execution: */ 74 74 if(!dontmpisync){ 75 #ifdef _HAVE_MPI_ 76 MPI_Barrier(IssmComm::GetComm()); 77 #endif 75 ISSM_MPI_Barrier(IssmComm::GetComm()); 78 76 } 79 77 80 78 /*Capture time: */ 81 79 #ifdef _HAVE_MPI_ 82 t= MPI_Wtime();80 t=ISSM_MPI_Wtime(); 83 81 #else 84 82 t=(IssmPDouble)clock(); -
issm/trunk-jpl/src/c/classes/Vertices.cpp
r15012 r15838 64 64 * First: get number of pids for each cpu*/ 65 65 allpidcount=xNew<int>(num_procs); 66 #ifdef _HAVE_MPI_ 67 MPI_Gather(&pidcount,1,MPI_INT,allpidcount,1,MPI_INT,0,IssmComm::GetComm()); 68 MPI_Bcast(allpidcount,num_procs,MPI_INT,0,IssmComm::GetComm()); 69 #else 70 allpidcount[0]=pidcount; 71 #endif 66 ISSM_MPI_Gather(&pidcount,1,ISSM_MPI_INT,allpidcount,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 67 ISSM_MPI_Bcast(allpidcount,num_procs,ISSM_MPI_INT,0,IssmComm::GetComm()); 72 68 73 69 /* Every cpu should start its own pid count at the end of the pidcount from cpu-1*/ … … 92 88 vertex->ShowTruePids(truepids); 93 89 } 94 #ifdef _HAVE_MPI_ 95 MPI_Allreduce((void*)truepids,(void*)alltruepids,numberofobjects,MPI_INT,MPI_MAX,IssmComm::GetComm()); 96 #else 97 for(i=0;i<numberofobjects;i++)alltruepids[i]=truepids[i]; 98 #endif 90 ISSM_MPI_Allreduce((void*)truepids,(void*)alltruepids,numberofobjects,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 99 91 100 92 /* Now every cpu knows the true pids of everyone else that is not a clone*/ … … 135 127 * dealt with by another cpu. We take the minimum because we are going to manage dof assignment in increasing 136 128 * order of cpu rank. This is also why we initialized this array to num_procs.*/ 137 #ifdef _HAVE_MPI_ 138 MPI_Allreduce ( (void*)ranks,(void*)minranks,numberofobjects,MPI_INT,MPI_MIN,IssmComm::GetComm()); 139 #else 140 for(i=0;i<numberofobjects;i++)minranks[i]=ranks[i]; 141 #endif 129 ISSM_MPI_Allreduce ( (void*)ranks,(void*)minranks,numberofobjects,ISSM_MPI_INT,ISSM_MPI_MIN,IssmComm::GetComm()); 142 130 143 131 /*Now go through all objects, and use minranks to flag which objects are cloned: */ … … 167 155 } 168 156 169 #ifdef _HAVE_MPI_ 170 MPI_Reduce (&max_sid,&vertex_max_sid,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 171 MPI_Bcast(&vertex_max_sid,1,MPI_INT,0,IssmComm::GetComm()); 157 ISSM_MPI_Reduce (&max_sid,&vertex_max_sid,1,ISSM_MPI_INT,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 158 ISSM_MPI_Bcast(&vertex_max_sid,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 172 159 max_sid=vertex_max_sid; 173 #endif174 160 175 161 /*sid starts at 0*/ -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/RiftConstraintsState.cpp
r15104 r15838 28 28 } 29 29 30 #ifdef _HAVE_MPI_ 31 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 32 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 30 ISSM_MPI_Reduce (&found,&mpi_found,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 31 ISSM_MPI_Bcast(&mpi_found,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 33 32 found=mpi_found; 34 #endif35 33 36 34 return found; … … 90 88 } 91 89 92 #ifdef _HAVE_MPI_ 93 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 94 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 90 ISSM_MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 91 ISSM_MPI_Bcast(&sum_num_unstable_constraints,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 95 92 num_unstable_constraints=sum_num_unstable_constraints; 96 #endif97 93 98 94 /*Assign output pointers: */ … … 130 126 131 127 /*Is there just one found? that would mean we have frozen! : */ 132 #ifdef _HAVE_MPI_ 133 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_MAX,0,IssmComm::GetComm() ); 134 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 128 ISSM_MPI_Reduce (&found,&mpi_found,1,ISSM_MPI_INT,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 129 ISSM_MPI_Bcast(&mpi_found,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 135 130 found=mpi_found; 136 #endif137 131 138 132 return found; … … 190 184 } 191 185 192 #ifdef _HAVE_MPI_ 193 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 194 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 186 ISSM_MPI_Reduce (&found,&mpi_found,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 187 ISSM_MPI_Bcast(&mpi_found,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 195 188 found=mpi_found; 196 #endif197 189 198 190 return found; … … 222 214 } 223 215 224 #ifdef _HAVE_MPI_ 225 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 226 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 216 ISSM_MPI_Reduce (&found,&mpi_found,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 217 ISSM_MPI_Bcast(&mpi_found,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 227 218 found=mpi_found; 228 #endif229 219 230 220 if (found){ … … 282 272 } 283 273 284 #ifdef _HAVE_MPI_ 285 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 286 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 274 ISSM_MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 275 ISSM_MPI_Bcast(&sum_num_unstable_constraints,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 287 276 num_unstable_constraints=sum_num_unstable_constraints; 288 #endif289 277 290 278 /*Assign output pointers: */ … … 322 310 } 323 311 324 #ifdef _HAVE_MPI_ 325 MPI_Reduce (&max_penetration,&mpi_max_penetration,1,MPI_DOUBLE,MPI_MAX,0,IssmComm::GetComm() ); 326 MPI_Bcast(&mpi_max_penetration,1,MPI_DOUBLE,0,IssmComm::GetComm()); 312 ISSM_MPI_Reduce (&max_penetration,&mpi_max_penetration,1,ISSM_MPI_DOUBLE,ISSM_MPI_MAX,0,IssmComm::GetComm() ); 313 ISSM_MPI_Bcast(&mpi_max_penetration,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 327 314 max_penetration=mpi_max_penetration; 328 #endif329 315 330 316 /*feed max_penetration to inputs: */ … … 361 347 } 362 348 363 #ifdef _HAVE_MPI_ 364 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 365 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 349 ISSM_MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 350 ISSM_MPI_Bcast(&sum_num_unstable_constraints,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 366 351 num_unstable_constraints=sum_num_unstable_constraints; 367 #endif368 352 369 353 return num_unstable_constraints; -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalConstraintsState.cpp
r13590 r15838 36 36 } 37 37 38 #ifdef _HAVE_MPI_ 39 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 40 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,IssmComm::GetComm()); 38 ISSM_MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 39 ISSM_MPI_Bcast(&sum_num_unstable_constraints,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 41 40 num_unstable_constraints=sum_num_unstable_constraints; 42 #endif43 41 44 42 /*Have we converged? : */ -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalIsPresent.cpp
r13622 r15838 28 28 } 29 29 30 #ifdef _HAVE_MPI_ 31 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() ); 32 MPI_Bcast(&mpi_found,1,MPI_INT,0,IssmComm::GetComm()); 30 ISSM_MPI_Reduce (&found,&mpi_found,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 31 ISSM_MPI_Bcast(&mpi_found,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 33 32 found=mpi_found; 34 #endif35 33 36 34 return found; -
issm/trunk-jpl/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/GroundinglineMigrationx/GroundinglineMigrationx.cpp
r15104 r15838 160 160 vec_nodes_on_floatingice->Assemble(); 161 161 162 #ifdef _HAVE_MPI_ 163 MPI_Allreduce(&local_nflipped,&nflipped,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 162 ISSM_MPI_Allreduce(&local_nflipped,&nflipped,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 164 163 if(VerboseConvergence()) _printf0_(" Additional number of vertices allowed to unground: " << nflipped << "\n"); 165 #else166 nflipped=local_nflipped;167 #endif168 164 169 165 /*Avoid leaks: */ -
issm/trunk-jpl/src/c/modules/Krigingx/pKrigingx.cpp
r15557 r15838 37 37 38 38 /*Get some Options*/ 39 MPI_Barrier(MPI_COMM_WORLD); start=MPI_Wtime();39 ISSM_MPI_Barrier(ISSM_MPI_COMM_WORLD); start=ISSM_MPI_Wtime(); 40 40 options->Get(&radius,"searchradius",0.); 41 41 options->Get(&mindata,"mindata",1); … … 43 43 44 44 /*Process observation dataset*/ 45 MPI_Barrier(MPI_COMM_WORLD); start_init=MPI_Wtime();45 ISSM_MPI_Barrier(ISSM_MPI_COMM_WORLD); start_init=ISSM_MPI_Wtime(); 46 46 observations=new Observations(obs_list,obs_x,obs_y,obs_length,options); 47 MPI_Barrier(MPI_COMM_WORLD); finish_init=MPI_Wtime();47 ISSM_MPI_Barrier(ISSM_MPI_COMM_WORLD); finish_init=ISSM_MPI_Wtime(); 48 48 49 49 /*Allocate output*/ … … 54 54 options->Get(&output,"output",(char*)"prediction"); 55 55 56 MPI_Barrier(MPI_COMM_WORLD); start_core=MPI_Wtime( );56 ISSM_MPI_Barrier(ISSM_MPI_COMM_WORLD); start_core=ISSM_MPI_Wtime( ); 57 57 if(strcmp(output,"quadtree")==0){ 58 58 observations->QuadtreeColoring(predictions,x_interp,y_interp,n_interp); … … 73 73 _printf0_(" interpolation progress: "<<fixed<<setw(6)<<setprecision(4)<<100.<<"% \n"); 74 74 75 #ifdef _HAVE_MPI_76 75 double *sumpredictions =xNew<double>(n_interp); 77 76 double *sumerror =xNew<double>(n_interp); 78 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm());79 MPI_Allreduce(error,sumerror,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm());77 ISSM_MPI_Allreduce(predictions,sumpredictions,n_interp,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm()); 78 ISSM_MPI_Allreduce(error,sumerror,n_interp,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm()); 80 79 xDelete<double>(error); error=sumerror; 81 80 xDelete<double>(predictions); predictions=sumpredictions; 82 #endif83 81 } 84 82 else if(strcmp(output,"v4")==0){ … … 91 89 _printf0_(" interpolation progress: "<<fixed<<setw(6)<<setprecision(4)<<100.<<"% \n"); 92 90 93 #ifdef _HAVE_MPI_94 91 double *sumpredictions =xNew<double>(n_interp); 95 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm());92 ISSM_MPI_Allreduce(predictions,sumpredictions,n_interp,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm()); 96 93 xDelete<double>(predictions); predictions=sumpredictions; 97 #endif98 94 } 99 95 else if(strcmp(output,"nearestneighbor")==0){ … … 106 102 _printf0_(" interpolation progress: "<<fixed<<setw(6)<<setprecision(4)<<100.<<"% \n"); 107 103 108 #ifdef _HAVE_MPI_109 104 double *sumpredictions =xNew<double>(n_interp); 110 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm());105 ISSM_MPI_Allreduce(predictions,sumpredictions,n_interp,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm()); 111 106 xDelete<double>(predictions); predictions=sumpredictions; 112 #endif113 107 } 114 108 else if(strcmp(output,"idw")==0){ … … 123 117 _printf0_(" interpolation progress: "<<fixed<<setw(6)<<setprecision(4)<<100.<<"% \n"); 124 118 125 #ifdef _HAVE_MPI_126 119 double *sumpredictions =xNew<double>(n_interp); 127 MPI_Allreduce(predictions,sumpredictions,n_interp,MPI_DOUBLE,MPI_SUM,IssmComm::GetComm());120 ISSM_MPI_Allreduce(predictions,sumpredictions,n_interp,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,IssmComm::GetComm()); 128 121 xDelete<double>(predictions); predictions=sumpredictions; 129 #endif130 122 } 131 123 else{ 132 124 _error_("output '" << output << "' not supported yet"); 133 125 } 134 MPI_Barrier(MPI_COMM_WORLD); finish_core=MPI_Wtime( );126 ISSM_MPI_Barrier(ISSM_MPI_COMM_WORLD); finish_core=ISSM_MPI_Wtime( ); 135 127 136 128 /*clean-up and Assign output pointer*/ … … 141 133 *perror = error; 142 134 143 MPI_Barrier(MPI_COMM_WORLD); finish=MPI_Wtime( );135 ISSM_MPI_Barrier(ISSM_MPI_COMM_WORLD); finish=ISSM_MPI_Wtime( ); 144 136 _printf0_("\n " << setw(34) << left << "Observation fitering elapsed time: " << finish_init-start_init << " seconds \n\n"); 145 137 _printf0_(" " << setw(34) << left << "Kriging prediction elapsed time: " << finish_core-start_core << " seconds \n\n"); -
issm/trunk-jpl/src/c/modules/NodalValuex/NodalValuex.cpp
r15130 r15838 37 37 38 38 /*Broadcast whether we found the element: */ 39 #ifdef _HAVE_MPI_ 40 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM,IssmComm::GetComm()); 39 ISSM_MPI_Allreduce ( &found,&sumfound,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm()); 41 40 if(!sumfound)_error_("could not find element with vertex with id" << index << " to compute nodal value " << EnumToStringx(natureofdataenum)); 42 #endif43 41 44 42 /*Broadcast and plug into response: */ 45 #ifdef _HAVE_MPI_ 46 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX,IssmComm::GetComm()); 47 MPI_Bcast(&value,1,MPI_DOUBLE,cpu_found,IssmComm::GetComm()); 48 #else 49 value=cpu_found; 50 #endif 43 ISSM_MPI_Allreduce ( &cpu_found,&cpu_found,1,ISSM_MPI_INT,ISSM_MPI_MAX,IssmComm::GetComm()); 44 ISSM_MPI_Bcast(&value,1,ISSM_MPI_DOUBLE,cpu_found,IssmComm::GetComm()); 51 45 52 46 *pnodalvalue=value; -
issm/trunk-jpl/src/c/modules/ParseToolkitsOptionsx/ParseToolkitsOptionsx.cpp
r14999 r15838 95 95 96 96 /*Ok, broadcast to other cpus: */ 97 #ifdef _HAVE_MPI_ 98 MPI_Bcast(&numanalyses,1,MPI_INT,0,IssmComm::GetComm()); 97 ISSM_MPI_Bcast(&numanalyses,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 99 98 if(my_rank!=0){ 100 99 analyses=xNew<IssmDouble>(numanalyses); 101 100 strings=xNew<char*>(numanalyses); 102 101 } 103 MPI_Bcast(analyses,numanalyses,MPI_DOUBLE,0,IssmComm::GetComm()); 104 #endif 102 ISSM_MPI_Bcast(analyses,numanalyses,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 105 103 for(i=0;i<numanalyses;i++){ 106 104 char* string=strings[i]; … … 109 107 } 110 108 if(my_rank==0)stringlength=(strlen(string)+1)*sizeof(char); 111 #ifdef _HAVE_MPI_ 112 MPI_Bcast(&stringlength,1,MPI_INT,0,IssmComm::GetComm()); 109 ISSM_MPI_Bcast(&stringlength,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 113 110 if(my_rank!=0)string=xNew<char>(stringlength); 114 MPI_Bcast(string,stringlength,MPI_CHAR,0,IssmComm::GetComm());111 ISSM_MPI_Bcast(string,stringlength,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 115 112 if(my_rank!=0)strings[i]=string; 116 #endif117 113 } 118 114 -
issm/trunk-jpl/src/c/modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceAreax/SurfaceAreax.cpp
r14999 r15838 26 26 27 27 /*Sum all J from all cpus of the cluster:*/ 28 #ifdef _HAVE_MPI_ 29 MPI_Reduce (&S,&S_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 30 MPI_Bcast(&S_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 28 ISSM_MPI_Reduce (&S,&S_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 29 ISSM_MPI_Bcast(&S_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 31 30 S=S_sum; 32 #endif33 31 34 32 /*add surface area to element inputs:*/ -
issm/trunk-jpl/src/c/modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp
r15130 r15838 29 29 30 30 /*Sum all J from all cpus of the cluster:*/ 31 #ifdef _HAVE_MPI_ 32 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 33 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 31 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 32 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 34 33 J=J_sum; 35 #endif36 34 37 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/ThicknessAcrossGradientx/ThicknessAcrossGradientx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/ThicknessAlongGradientx/ThicknessAlongGradientx.cpp
r15130 r15838 25 25 26 26 /*Sum all J from all cpus of the cluster:*/ 27 #ifdef _HAVE_MPI_ 28 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,IssmComm::GetComm() ); 29 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,IssmComm::GetComm()); 27 ISSM_MPI_Reduce (&J,&J_sum,1,ISSM_MPI_DOUBLE,ISSM_MPI_SUM,0,IssmComm::GetComm() ); 28 ISSM_MPI_Bcast(&J_sum,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 30 29 J=J_sum; 31 #endif32 30 33 31 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/shared/Numerics/types.h
r14915 r15838 17 17 #if ISSM_USE_64BIT_INDICES == 1 18 18 typedef long long IssmInt; 19 //#define MPIU_INT MPI_LONG_LONG_INT already define in petsc20 19 #else 21 20 typedef int IssmInt; 22 //#define MPIU_INT MPI_INT already defined in petsc23 21 #endif 24 22 -
issm/trunk-jpl/src/c/shared/io/Comm/Comm.h
r14897 r15838 6 6 #define _IO_COMM_H_ 7 7 8 #include ". /CommDef.h"8 #include "../../../toolkits/mpi/issmmpi.h" 9 9 #include "./IssmComm.h" 10 10 -
issm/trunk-jpl/src/c/shared/io/Comm/IssmComm.cpp
r14950 r15838 13 13 #include "../../Exceptions/exceptions.h" 14 14 15 void IssmComm::SetComm( COMM incomm){ /*{{{*/15 void IssmComm::SetComm(ISSM_MPI_COMM incomm){ /*{{{*/ 16 16 17 17 /*A comm is provided, we are running in parallel (this is not a module)*/ … … 28 28 29 29 }/*}}}*/ 30 COMM IssmComm::GetComm(){ /*{{{*/30 ISSM_MPI_COMM IssmComm::GetComm(){ /*{{{*/ 31 31 if(!parallel) _error_("Cannot return comm in serial mode"); 32 32 return comm; … … 39 39 if(!parallel) return my_rank; 40 40 41 #ifdef _HAVE_MPI_ 42 MPI_Comm_rank(comm,&my_rank); 43 #endif 41 ISSM_MPI_Comm_rank(comm,&my_rank); 44 42 45 43 return my_rank; … … 53 51 if(!parallel) return size; 54 52 55 #ifdef _HAVE_MPI_ 56 MPI_Comm_size(comm,&size); 57 #endif 53 ISSM_MPI_Comm_size(comm,&size); 58 54 59 55 return size; -
issm/trunk-jpl/src/c/shared/io/Comm/IssmComm.h
r14897 r15838 21 21 22 22 private: 23 static COMMcomm;23 static ISSM_MPI_Comm comm; 24 24 static bool parallel; 25 25 26 26 public: 27 static void SetComm( COMM incomm);27 static void SetComm(ISSM_MPI_COMM incomm); 28 28 static void SetComm(void); 29 static COMM GetComm(void);29 static ISSM_MPI_COMM GetComm(void); 30 30 static int GetRank(void); 31 31 static int GetSize(void); -
issm/trunk-jpl/src/c/shared/io/Disk/pfopen.cpp
r15559 r15838 39 39 for(int i=0;i<num_proc;i++){ 40 40 if(my_rank==i) fid = fopen(filename,format); 41 #ifdef _HAVE_MPI_ 42 MPI_Barrier(IssmComm::GetComm()); 43 #endif 41 ISSM_MPI_Barrier(IssmComm::GetComm()); 44 42 } 45 43 if(fid==NULL) _error_("could not open file " << filename << " for binary reading or writing"); -
issm/trunk-jpl/src/c/toolkits/issm/Bucket.h
r15104 r15838 13 13 /*}}}*/ 14 14 15 /*how many MPI_Isend requests does it take to transfer the contents of a bucket to another cpu?*/15 /*how many ISSM_MPI_Isend requests does it take to transfer the contents of a bucket to another cpu?*/ 16 16 #define MATRIXBUCKETSIZEOFREQUESTS 7 17 17 #define VECTORBUCKETSIZEOFREQUESTS 5 -
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiDenseMat.h
r15104 r15838 153 153 } 154 154 } 155 MPI_Barrier(IssmComm::GetComm());155 ISSM_MPI_Barrier(IssmComm::GetComm()); 156 156 } 157 157 … … 187 187 /*some communicator info: */ 188 188 num_procs=IssmComm::GetSize(); 189 MPI_Comm comm=IssmComm::GetComm();189 ISSM_MPI_Comm comm=IssmComm::GetComm(); 190 190 191 191 /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */ … … 215 215 numvalues_fromcpu = xNew<int>(num_procs); 216 216 for(i=0;i<num_procs;i++){ 217 MPI_Scatter(numvalues_forcpu,1,MPI_INT,numvalues_fromcpu+i,1,MPI_INT,i,comm);217 ISSM_MPI_Scatter(numvalues_forcpu,1,ISSM_MPI_INT,numvalues_fromcpu+i,1,ISSM_MPI_INT,i,comm); 218 218 } 219 219 … … 241 241 /*Scatter values around: {{{*/ 242 242 /*Now, to scatter values across the cluster, we need sendcnts and displs. Our sendbufs have been built by BucketsBuildScatterBuffers, with a stride given 243 * by numvalues_forcpu. Get this ready to go before starting the scatter itslef. For reference, here is the MPI_Scatterv prototype:244 * int MPI_Scatterv( void *sendbuf, int *sendcnts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root,MPI_Comm comm) :*/243 * by numvalues_forcpu. Get this ready to go before starting the scatter itslef. For reference, here is the ISSM_MPI_Scatterv prototype: 244 * int ISSM_MPI_Scatterv( void *sendbuf, int *sendcnts, int *displs, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm) :*/ 245 245 sendcnts=xNew<int>(num_procs); 246 246 displs=xNew<int>(num_procs); … … 253 253 254 254 for(i=0;i<num_procs;i++){ 255 MPI_Scatterv( row_indices_forcpu, sendcnts, displs, MPI_INT, row_indices_fromcpu[i], numvalues_fromcpu[i],MPI_INT, i, comm);256 MPI_Scatterv( col_indices_forcpu, sendcnts, displs, MPI_INT, col_indices_fromcpu[i], numvalues_fromcpu[i],MPI_INT, i, comm);257 MPI_Scatterv( values_forcpu, sendcnts, displs, MPI_DOUBLE, values_fromcpu[i], numvalues_fromcpu[i],MPI_DOUBLE, i, comm);258 MPI_Scatterv( modes_forcpu, sendcnts, displs, MPI_INT, modes_fromcpu[i], numvalues_fromcpu[i],MPI_INT, i, comm);255 ISSM_MPI_Scatterv( row_indices_forcpu, sendcnts, displs, ISSM_MPI_INT, row_indices_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm); 256 ISSM_MPI_Scatterv( col_indices_forcpu, sendcnts, displs, ISSM_MPI_INT, col_indices_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm); 257 ISSM_MPI_Scatterv( values_forcpu, sendcnts, displs, ISSM_MPI_DOUBLE, values_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_DOUBLE, i, comm); 258 ISSM_MPI_Scatterv( modes_forcpu, sendcnts, displs, ISSM_MPI_INT, modes_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm); 259 259 } 260 260 /*}}}*/ … … 330 330 local_norm=max(local_norm,absolute); 331 331 } 332 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE,MPI_MAX, 0, IssmComm::GetComm());333 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm());332 ISSM_MPI_Reduce(&local_norm, &norm, 1, ISSM_MPI_DOUBLE, ISSM_MPI_MAX, 0, IssmComm::GetComm()); 333 ISSM_MPI_Bcast(&norm,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 334 334 return norm; 335 335 break; … … 341 341 } 342 342 } 343 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE,MPI_SUM, 0, IssmComm::GetComm());344 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm());343 ISSM_MPI_Reduce(&local_norm, &norm, 1, ISSM_MPI_DOUBLE, ISSM_MPI_SUM, 0, IssmComm::GetComm()); 344 ISSM_MPI_Bcast(&norm,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 345 345 return sqrt(norm); 346 346 break; -
issm/trunk-jpl/src/c/toolkits/issm/IssmMpiVec.h
r15365 r15838 21 21 #include "../../shared/MemOps/MemOps.h" 22 22 #include "../../shared/io/io.h" 23 #ifdef _HAVE_MPI_ 24 #include "../mpi/mpiincludes.h" 25 #endif 23 #include "../mpi/issmmpi.h" 26 24 #include <math.h> 27 25 … … 138 136 _printf_("\n"); 139 137 } 140 MPI_Barrier(IssmComm::GetComm());138 ISSM_MPI_Barrier(IssmComm::GetComm()); 141 139 } 142 140 } … … 171 169 /*some communicator info: */ 172 170 num_procs=IssmComm::GetSize(); 173 MPI_Comm comm=IssmComm::GetComm();171 ISSM_MPI_Comm comm=IssmComm::GetComm(); 174 172 175 173 /*First, make a vector of size M, which for each row between 0 and M-1, tells which cpu this row belongs to: */ … … 199 197 numvalues_fromcpu = xNew<int>(num_procs); 200 198 for(i=0;i<num_procs;i++){ 201 MPI_Scatter(numvalues_forcpu,1,MPI_INT,numvalues_fromcpu+i,1,MPI_INT,i,comm);199 ISSM_MPI_Scatter(numvalues_forcpu,1,ISSM_MPI_INT,numvalues_fromcpu+i,1,ISSM_MPI_INT,i,comm); 202 200 } 203 201 … … 222 220 /*Scatter values around: {{{*/ 223 221 /*Now, to scatter values across the cluster, we need sendcnts and displs. Our sendbufs have been built by BucketsBuildScatterBuffers, with a stride given 224 * by numvalues_forcpu. Get this ready to go before starting the scatter itslef. For reference, here is the MPI_Scatterv prototype:225 * int MPI_Scatterv( void *sendbuf, int *sendcnts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root,MPI_Comm comm) :*/222 * by numvalues_forcpu. Get this ready to go before starting the scatter itslef. For reference, here is the ISSM_MPI_Scatterv prototype: 223 * int ISSM_MPI_Scatterv( void *sendbuf, int *sendcnts, int *displs, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm) :*/ 226 224 sendcnts=xNew<int>(num_procs); 227 225 displs=xNew<int>(num_procs); … … 234 232 235 233 for(i=0;i<num_procs;i++){ 236 MPI_Scatterv( row_indices_forcpu, sendcnts, displs, MPI_INT, row_indices_fromcpu[i], numvalues_fromcpu[i],MPI_INT, i, comm);237 MPI_Scatterv( values_forcpu, sendcnts, displs, MPI_DOUBLE, values_fromcpu[i], numvalues_fromcpu[i],MPI_DOUBLE, i, comm);238 MPI_Scatterv( modes_forcpu, sendcnts, displs, MPI_INT, modes_fromcpu[i], numvalues_fromcpu[i],MPI_INT, i, comm);234 ISSM_MPI_Scatterv( row_indices_forcpu, sendcnts, displs, ISSM_MPI_INT, row_indices_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm); 235 ISSM_MPI_Scatterv( values_forcpu, sendcnts, displs, ISSM_MPI_DOUBLE, values_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_DOUBLE, i, comm); 236 ISSM_MPI_Scatterv( modes_forcpu, sendcnts, displs, ISSM_MPI_INT, modes_fromcpu[i], numvalues_fromcpu[i], ISSM_MPI_INT, i, comm); 239 237 } 240 238 /*}}}*/ … … 378 376 379 377 /*communicator info: */ 380 MPI_Comm comm;378 ISSM_MPI_Comm comm; 381 379 int num_procs; 382 380 383 /* MPI_Allgatherv info: */381 /*ISSM_MPI_Allgatherv info: */ 384 382 int lower_row,upper_row; 385 383 int* recvcounts=NULL; … … 399 397 400 398 /*recvcounts:*/ 401 MPI_Allgather(&this->m,1,MPI_INT,recvcounts,1,MPI_INT,comm);399 ISSM_MPI_Allgather(&this->m,1,ISSM_MPI_INT,recvcounts,1,ISSM_MPI_INT,comm); 402 400 403 401 /*get lower_row: */ … … 405 403 406 404 /*displs: */ 407 MPI_Allgather(&lower_row,1,MPI_INT,displs,1,MPI_INT,comm);405 ISSM_MPI_Allgather(&lower_row,1,ISSM_MPI_INT,displs,1,ISSM_MPI_INT,comm); 408 406 409 407 /*All gather:*/ 410 MPI_Allgatherv(this->vector, this->m, MPI_DOUBLE, buffer, recvcounts, displs,MPI_DOUBLE,comm);408 ISSM_MPI_Allgatherv(this->vector, this->m, ISSM_MPI_DOUBLE, buffer, recvcounts, displs, ISSM_MPI_DOUBLE,comm); 411 409 412 410 /*free ressources: */ … … 445 443 //local_norm=0; for(i=0;i<this->m;i++)local_norm=max(local_norm,fabs(this->vector[i])); 446 444 local_norm=0; for(i=0;i<this->m;i++)local_norm=max(local_norm,this->vector[i]); 447 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE,MPI_MAX, 0, IssmComm::GetComm());448 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm());445 ISSM_MPI_Reduce(&local_norm, &norm, 1, ISSM_MPI_DOUBLE, ISSM_MPI_MAX, 0, IssmComm::GetComm()); 446 ISSM_MPI_Bcast(&norm,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 449 447 return norm; 450 448 break; … … 452 450 local_norm=0; 453 451 for(i=0;i<this->m;i++)local_norm+=pow(this->vector[i],2); 454 MPI_Reduce(&local_norm, &norm, 1, MPI_DOUBLE,MPI_SUM, 0, IssmComm::GetComm());455 MPI_Bcast(&norm,1,MPI_DOUBLE,0,IssmComm::GetComm());452 ISSM_MPI_Reduce(&local_norm, &norm, 1, ISSM_MPI_DOUBLE, ISSM_MPI_SUM, 0, IssmComm::GetComm()); 453 ISSM_MPI_Bcast(&norm,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 456 454 return sqrt(norm); 457 455 break; … … 484 482 for(i=0;i<this->m;i++)local_dot+=this->vector[i]*input->vector[i]; 485 483 486 #ifdef _HAVE_MPI_ 487 /*MPI_SUM all the dots across the cluster: */ 488 MPI_Reduce(&local_dot, &dot, 1, MPI_DOUBLE, MPI_SUM, 0, IssmComm::GetComm()); 489 MPI_Bcast(&dot,1,MPI_DOUBLE,0,IssmComm::GetComm()); 490 #endif 484 /*ISSM_MPI_SUM all the dots across the cluster: */ 485 ISSM_MPI_Reduce(&local_dot, &dot, 1, ISSM_MPI_DOUBLE, ISSM_MPI_SUM, 0, IssmComm::GetComm()); 486 ISSM_MPI_Bcast(&dot,1,ISSM_MPI_DOUBLE,0,IssmComm::GetComm()); 491 487 492 488 return dot; -
issm/trunk-jpl/src/c/toolkits/mpi/commops/DetermineGlobalSize.cpp
r14915 r15838 13 13 int global_size; 14 14 15 MPI_Reduce(&local_size, &global_size, 1, MPI_INT,MPI_SUM, 0, comm);16 MPI_Bcast(&global_size,1,MPI_INT,0,comm);15 ISSM_MPI_Reduce(&local_size, &global_size, 1, ISSM_MPI_INT, ISSM_MPI_SUM, 0, comm); 16 ISSM_MPI_Bcast(&global_size,1,ISSM_MPI_INT,0,comm); 17 17 18 18 return global_size; -
issm/trunk-jpl/src/c/toolkits/mpi/commops/DetermineLocalSize.cpp
r14965 r15838 22 22 23 23 /*recover my_rank*/ 24 MPI_Comm_rank(comm,&my_rank);25 MPI_Comm_size(comm,&num_procs);24 ISSM_MPI_Comm_rank(comm,&my_rank); 25 ISSM_MPI_Comm_size(comm,&num_procs); 26 26 27 27 /* TODO replace the following with -> -
issm/trunk-jpl/src/c/toolkits/mpi/commops/DetermineRowRankFromLocalSize.cpp
r14915 r15838 21 21 int* RowRank=NULL; 22 22 23 MPI_Comm_rank(comm,&my_rank);24 MPI_Comm_size(comm,&num_procs);23 ISSM_MPI_Comm_rank(comm,&my_rank); 24 ISSM_MPI_Comm_size(comm,&num_procs); 25 25 26 26 /*allocate: */ … … 29 29 /*Gather all local_size values into alllocalsizes, for all cpus*/ 30 30 int* alllocalsizes=xNew<int>(num_procs); 31 MPI_Allgather(&localsize,1,MPI_INT,alllocalsizes,1,MPI_INT,comm);31 ISSM_MPI_Allgather(&localsize,1,ISSM_MPI_INT,alllocalsizes,1,ISSM_MPI_INT,comm); 32 32 33 33 /*From all localsizes, get lower row and upper row*/ -
issm/trunk-jpl/src/c/toolkits/mpi/commops/GetOwnershipBoundariesFromRange.cpp
r14950 r15838 21 21 22 22 /*recover my_rank and num_procs:*/ 23 MPI_Comm_size(comm,&num_procs);24 MPI_Comm_rank(comm,&my_rank);23 ISSM_MPI_Comm_size(comm,&num_procs); 24 ISSM_MPI_Comm_rank(comm,&my_rank); 25 25 26 26 /*output: */ … … 29 29 /*Gather all range values into allranges, for all nodes*/ 30 30 int* allranges=xNew<int>(num_procs); 31 MPI_Allgather(&range,1,MPI_INT,allranges,1,MPI_INT,comm);31 ISSM_MPI_Allgather(&range,1,ISSM_MPI_INT,allranges,1,ISSM_MPI_INT,comm); 32 32 33 33 /*From all ranges, get lower row and upper row*/ -
issm/trunk-jpl/src/c/toolkits/mumps/MpiDenseMumpsSolve.cpp
r14950 r15838 14 14 #include "../../shared/Exceptions/exceptions.h" 15 15 #include "../../shared/io/Comm/Comm.h" 16 #include "../mpi/ patches/mpipatches.h"16 #include "../mpi/issmmpi.h" 17 17 18 18 /*Mumps header files: */ … … 28 28 /*Variables: {{{*/ 29 29 30 MPI_Comm comm;30 ISSM_MPI_Comm comm; 31 31 int my_rank; 32 32 int num_procs; … … 84 84 } 85 85 86 MPI_Reduce(&local_nnz,&nnz,1,MPI_INT,MPI_SUM,0,comm);87 MPI_Bcast(&nnz,1,MPI_INT,0,comm);86 ISSM_MPI_Reduce(&local_nnz,&nnz,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,comm); 87 ISSM_MPI_Bcast(&nnz,1,ISSM_MPI_INT,0,comm); 88 88 id.nz=nnz; 89 89 id.nz_loc=local_nnz; … … 113 113 id.a_loc=a_loc; 114 114 115 /*Deal with right hand side. We need to MPI_Gather it onto cpu 0: */115 /*Deal with right hand side. We need to ISSM_MPI_Gather it onto cpu 0: */ 116 116 rhs=xNew<IssmPDouble>(pf_M); 117 117 … … 120 120 121 121 /*recvcounts:*/ 122 MPI_Allgather(&pf_m,1,MPI_INT,recvcounts,1,MPI_INT,comm);122 ISSM_MPI_Allgather(&pf_m,1,ISSM_MPI_INT,recvcounts,1,ISSM_MPI_INT,comm); 123 123 124 124 /*displs: */ 125 MPI_Allgather(&lower_row,1,MPI_INT,displs,1,MPI_INT,comm);125 ISSM_MPI_Allgather(&lower_row,1,ISSM_MPI_INT,displs,1,ISSM_MPI_INT,comm); 126 126 127 127 /*Gather:*/ 128 MPI_Gatherv(pf, pf_m, MPI_DOUBLE, rhs, recvcounts, displs,MPI_DOUBLE,0,comm);128 ISSM_MPI_Gatherv(pf, pf_m, ISSM_MPI_DOUBLE, rhs, recvcounts, displs, ISSM_MPI_DOUBLE,0,comm); 129 129 id.rhs=rhs; 130 130 id.nrhs=1; … … 137 137 /*}}}*/ 138 138 /*Now scatter from cpu 0 to all other cpus: {{{*/ 139 MPI_Scatterv( rhs, recvcounts, displs, MPI_DOUBLE, uf, uf_m,MPI_DOUBLE, 0, comm);139 ISSM_MPI_Scatterv( rhs, recvcounts, displs, ISSM_MPI_DOUBLE, uf, uf_m, ISSM_MPI_DOUBLE, 0, comm); 140 140 141 141 /*}}}*/ -
issm/trunk-jpl/src/c/toolkits/petsc/patches/MatMultPatch.cpp
r15513 r15838 14 14 #include <petscksp.h> 15 15 16 #include "../../mpi/ mpiincludes.h"16 #include "../../mpi/issmmpi.h" 17 17 #include "../../../shared/shared.h" 18 18 … … 54 54 55 55 /*recover num_procs:*/ 56 MPI_Comm_size(comm,&num_procs);56 ISSM_MPI_Comm_size(comm,&num_procs); 57 57 58 58 MatGetLocalSize(A,&local_m,&local_n);; … … 62 62 63 63 /*synchronize result: */ 64 MPI_Reduce (&result,&sumresult,1,MPI_INT,MPI_SUM,0,comm );65 MPI_Bcast(&sumresult,1,MPI_INT,0,comm);64 ISSM_MPI_Reduce (&result,&sumresult,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,comm ); 65 ISSM_MPI_Bcast(&sumresult,1,ISSM_MPI_INT,0,comm); 66 66 if (sumresult!=num_procs){ 67 67 result=0; -
issm/trunk-jpl/src/c/toolkits/petsc/patches/MatToSerial.cpp
r13622 r15838 22 22 int range; 23 23 int M,N; //size of matrix 24 MPI_Status status;24 ISSM_MPI_Status status; 25 25 int* idxm=NULL; 26 26 int* idxn=NULL; … … 29 29 30 30 /*recover my_rank and num_procs:*/ 31 MPI_Comm_size(comm,&num_procs);32 MPI_Comm_rank(comm,&my_rank);31 ISSM_MPI_Comm_size(comm,&num_procs); 32 ISSM_MPI_Comm_rank(comm,&my_rank); 33 33 34 34 /*Output*/ … … 69 69 buffer[1]=lower_row; 70 70 buffer[2]=range; 71 MPI_Send(buffer,3,MPI_INT,0,1,comm);72 if (range) MPI_Send(local_matrix,N*range,MPI_DOUBLE,0,1,comm);71 ISSM_MPI_Send(buffer,3,ISSM_MPI_INT,0,1,comm); 72 if (range)ISSM_MPI_Send(local_matrix,N*range,ISSM_MPI_DOUBLE,0,1,comm); 73 73 } 74 74 if (my_rank==0){ 75 MPI_Recv(buffer,3,MPI_INT,i,1,comm,&status);76 if (buffer[2]) MPI_Recv(outmatrix+(buffer[1]*N),N*buffer[2],MPI_DOUBLE,i,1,comm,&status);75 ISSM_MPI_Recv(buffer,3,ISSM_MPI_INT,i,1,comm,&status); 76 if (buffer[2])ISSM_MPI_Recv(outmatrix+(buffer[1]*N),N*buffer[2],ISSM_MPI_DOUBLE,i,1,comm,&status); 77 77 } 78 78 } -
issm/trunk-jpl/src/c/toolkits/petsc/patches/NewMat.cpp
r15029 r15838 15 15 16 16 #include "./petscpatches.h" 17 18 17 #include "../../../shared/shared.h" 19 #include "../../mpi/ patches/mpipatches.h"18 #include "../../mpi/issmmpi.h" 20 19 21 20 /*NewMat(int M,int N){{{*/ -
issm/trunk-jpl/src/c/toolkits/petsc/patches/NewVec.cpp
r13760 r15838 15 15 16 16 #include "./petscpatches.h" 17 #include "../../mpi/ patches/mpipatches.h"17 #include "../../mpi/issmmpi.h" 18 18 19 19 Vec NewVec(int size,COMM comm,bool fromlocalsize){ -
issm/trunk-jpl/src/c/toolkits/petsc/patches/VecToMPISerial.cpp
r13622 r15838 19 19 20 20 /*Petsc*/ 21 MPI_Status status;21 ISSM_MPI_Status status; 22 22 PetscInt lower_row,upper_row; 23 23 int range; … … 35 35 36 36 /*recover my_rank and num_procs*/ 37 MPI_Comm_size(comm,&num_procs);38 MPI_Comm_rank(comm,&my_rank);37 ISSM_MPI_Comm_size(comm,&num_procs); 38 ISSM_MPI_Comm_rank(comm,&my_rank); 39 39 40 40 VecGetSize(vector,&vector_size); … … 69 69 buffer[1]=lower_row; 70 70 buffer[2]=range; 71 MPI_Send(buffer,3,MPI_INT,0,1,comm);72 if (range) MPI_Send(local_vector,range,MPI_DOUBLE,0,1,comm);71 ISSM_MPI_Send(buffer,3,ISSM_MPI_INT,0,1,comm); 72 if (range)ISSM_MPI_Send(local_vector,range,ISSM_MPI_DOUBLE,0,1,comm); 73 73 } 74 74 if (my_rank==0){ 75 MPI_Recv(buffer,3,MPI_INT,i,1,comm,&status);76 if (buffer[2]) MPI_Recv(gathered_vector+buffer[1],buffer[2],MPI_DOUBLE,i,1,comm,&status);75 ISSM_MPI_Recv(buffer,3,ISSM_MPI_INT,i,1,comm,&status); 76 if (buffer[2])ISSM_MPI_Recv(gathered_vector+buffer[1],buffer[2],ISSM_MPI_DOUBLE,i,1,comm,&status); 77 77 } 78 78 } … … 84 84 85 85 /*Now, broadcast gathered_vector from node 0 to other nodes: */ 86 MPI_Bcast(gathered_vector,vector_size,MPI_DOUBLE,0,comm);86 ISSM_MPI_Bcast(gathered_vector,vector_size,ISSM_MPI_DOUBLE,0,comm); 87 87 88 88 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/toolkits/plapack/patches/PlapackInvertMatrix.cpp
r14917 r15838 12 12 #include "../../scalapack/FortranMapping.h" 13 13 14 void PlapackInvertMatrixLocalCleanup(PLA_Obj* pa,PLA_Template* ptempl,double** parrayA,int** pidxnA, MPI_Comm* pcomm_2d);14 void PlapackInvertMatrixLocalCleanup(PLA_Obj* pa,PLA_Template* ptempl,double** parrayA,int** pidxnA,ISSM_MPI_Comm* pcomm_2d); 15 15 16 16 int PlapackInvertMatrix(Mat* A,Mat* inv_A,int status,int con,COMM comm){ … … 27 27 28 28 /*Plapack: */ 29 MPI_Datatype datatype;30 MPI_Comm comm_2d;29 ISSM_MPI_Datatype datatype; 30 ISSM_MPI_Comm comm_2d; 31 31 PLA_Obj a=NULL; 32 32 PLA_Template templ; … … 85 85 86 86 /* Set the datatype */ 87 datatype = MPI_DOUBLE;87 datatype = ISSM_MPI_DOUBLE; 88 88 89 89 /* Copy A into a*/ … … 123 123 /*Finalize PLAPACK*/ 124 124 PLA_Finalize(); 125 MPI_Comm_free(&comm_2d);125 ISSM_MPI_Comm_free(&comm_2d); 126 126 } -
issm/trunk-jpl/src/c/toolkits/toolkits.h
r15053 r15838 16 16 #endif 17 17 18 #ifdef _HAVE_MPI_ 19 #include "./mpi/mpiincludes.h" 20 #endif 18 #include "./mpi/issmmpi.h" 21 19 22 20 #ifdef _HAVE_METIS_
Note:
See TracChangeset
for help on using the changeset viewer.