Changeset 12016
- Timestamp:
- 04/16/12 19:19:56 (13 years ago)
- Location:
- issm/trunk-jpl/src/c
- Files:
-
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/Container/Constraints.cpp
r10522 r12016 48 48 49 49 /*figure out total number of constraints combining all the cpus (no clones here)*/ 50 #ifdef _PARALLEL_51 50 MPI_Reduce(&localconstraints,&numberofconstraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 52 51 MPI_Bcast(&numberofconstraints,1,MPI_INT,0,MPI_COMM_WORLD); 53 #else54 numberofconstraints=localconstraints;55 #endif56 52 57 53 return numberofconstraints; -
issm/trunk-jpl/src/c/Container/Elements.cpp
r11695 r12016 124 124 } 125 125 126 #ifdef _PARALLEL_127 126 /*Synchronize across cluster, so as to not end up with different sizes for each patch on each cpu: */ 128 127 MPI_Reduce (&numvertices,&max_numvertices,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); … … 133 132 MPI_Bcast(&max_numnodes,1,MPI_INT,0,MPI_COMM_WORLD); 134 133 numnodes=max_numnodes; 135 #endif136 134 137 135 /*Ok, initialize Patch object: */ … … 250 248 251 249 /*Gather onto master cpu 0, if needed: */ 252 #ifdef _PARALLEL_253 250 if(io_gather)patch->Gather(); 254 #endif255 251 256 252 /*create result object and add to results dataset:*/ … … 276 272 int numberofelements; 277 273 278 #ifdef _PARALLEL_279 274 local_nelem=this->Size(); 280 275 MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 281 #else282 numberofelements=this->Size();283 #endif284 276 285 277 return numberofelements; -
issm/trunk-jpl/src/c/Container/Loads.cpp
r10522 r12016 63 63 64 64 /*figure out total number of loads combining all the cpus (no clones here)*/ 65 #ifdef _PARALLEL_66 65 MPI_Reduce(&localloads,&numberofloads,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 67 66 MPI_Bcast(&numberofloads,1,MPI_INT,0,MPI_COMM_WORLD); 68 #else69 numberofloads=localloads;70 #endif71 67 72 68 return numberofloads; -
issm/trunk-jpl/src/c/Container/Nodes.cpp
r11298 r12016 204 204 } 205 205 206 #ifdef _PARALLEL_207 206 /*Grab max of all cpus: */ 208 207 MPI_Allreduce ( (void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); 209 208 max=allmax; 210 #endif211 209 212 210 return max; … … 288 286 } 289 287 290 #ifdef _PARALLEL_ 291 MPI_Reduce (&max_sid,&node_max_sid,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 292 MPI_Bcast(&node_max_sid,1,MPI_INT,0,MPI_COMM_WORLD); 293 max_sid=node_max_sid; 294 #endif 288 MPI_Reduce (&max_sid,&node_max_sid,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 289 MPI_Bcast(&node_max_sid,1,MPI_INT,0,MPI_COMM_WORLD); 290 max_sid=node_max_sid; 295 291 296 292 if(max_sid==1){ -
issm/trunk-jpl/src/c/Container/Vertices.cpp
r11298 r12016 149 149 } 150 150 151 #ifdef _PARALLEL_152 151 MPI_Reduce (&max_sid,&vertex_max_sid,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 153 152 MPI_Bcast(&vertex_max_sid,1,MPI_INT,0,MPI_COMM_WORLD); 154 153 max_sid=vertex_max_sid; 155 #endif156 154 157 155 /*sid starts at 0*/ -
issm/trunk-jpl/src/c/Makefile.am
r12015 r12016 361 361 #}}} 362 362 #Transient sources {{{1 363 transient_sources = ./modules/ModelProcessorx/Transient/UpdateElementsTransient.cpp 364 transient_psources =./solutions/transient_core.cpp363 transient_sources = ./modules/ModelProcessorx/Transient/UpdateElementsTransient.cpp \ 364 ./solutions/transient_core.cpp 365 365 #}}} 366 366 #Steadystate sources {{{1 367 steadystate_ psources = ./solutions/steadystate_core.cpp\368 367 steadystate_sources = ./solutions/steadystate_core.cpp\ 368 ./solutions/steadystateconvergence.cpp 369 369 #}}} 370 370 #Prognostic sources {{{1 … … 372 372 ./modules/ModelProcessorx/Prognostic/CreateNodesPrognostic.cpp\ 373 373 ./modules/ModelProcessorx/Prognostic/CreateConstraintsPrognostic.cpp\ 374 ./modules/ModelProcessorx/Prognostic/CreateLoadsPrognostic.cpp 375 prognostic_psources =./solutions/prognostic_core.cpp374 ./modules/ModelProcessorx/Prognostic/CreateLoadsPrognostic.cpp\ 375 ./solutions/prognostic_core.cpp 376 376 #}}} 377 377 #Thermal sources {{{1 … … 390 390 ./modules/ConstraintsStatex/ThermalConstraintsState.cpp\ 391 391 ./modules/ConstraintsStatex/ThermalIsPresent.cpp\ 392 ./modules/ResetConstraintsx/ThermalConstraintsReset.cpp 393 394 thermal_psources = ./solutions/thermal_core.cpp\ 395 ./solutions/enthalpy_core.cpp\ 396 ./solvers/solver_thermal_nonlinear.cpp 392 ./modules/ResetConstraintsx/ThermalConstraintsReset.cpp \ 393 ./solutions/thermal_core.cpp\ 394 ./solutions/enthalpy_core.cpp\ 395 ./solvers/solver_thermal_nonlinear.cpp 397 396 #}}} 398 397 #Control sources {{{1 … … 438 437 ./objects/Inputs/ControlInput.cpp\ 439 438 ./shared/Numerics/BrentSearch.cpp\ 440 ./shared/Numerics/OptimalSearch.cpp 441 442 control_psources=./solutions/control_core.cpp\ 439 ./shared/Numerics/OptimalSearch.cpp \ 440 ./solutions/control_core.cpp\ 443 441 ./solutions/controltao_core.cpp\ 444 442 ./solutions/controlrestart.cpp\ … … 456 454 ./modules/ModelProcessorx/Hydrology/CreateNodesHydrology.cpp\ 457 455 ./modules/ModelProcessorx/Hydrology/CreateConstraintsHydrology.cpp\ 458 ./modules/ModelProcessorx/Hydrology/CreateLoadsHydrology.cpp 459 460 hydrology_psources = ./solutions/hydrology_core.cpp\ 461 ./solutions/hydrology_core_step.cpp 456 ./modules/ModelProcessorx/Hydrology/CreateLoadsHydrology.cpp \ 457 ./solutions/hydrology_core.cpp\ 458 ./solutions/hydrology_core_step.cpp 462 459 #}}} 463 460 #Diagnostic sources {{{1 … … 474 471 ./modules/ModelProcessorx/DiagnosticHutter/CreateConstraintsDiagnosticHutter.cpp \ 475 472 ./modules/ModelProcessorx/DiagnosticHutter/CreateLoadsDiagnosticHutter.cpp \ 476 477 478 479 480 ./shared/Elements/TransformSolutionCoord.cpp481 diagnostic_psources =./solutions/diagnostic_core.cpp\482 473 ./shared/Elements/CoordinateSystemTransform.cpp\ 474 ./shared/Elements/TransformLoadVectorCoord.cpp \ 475 ./shared/Elements/TransformStiffnessMatrixCoord.cpp \ 476 ./shared/Elements/TransformInvStiffnessMatrixCoord.cpp \ 477 ./shared/Elements/TransformSolutionCoord.cpp\ 478 ./solutions/diagnostic_core.cpp\ 479 ./solvers/solver_stokescoupling_nonlinear.cpp 483 480 #}}} 484 481 #Balanced sources {{{1 … … 486 483 ./modules/ModelProcessorx/Balancethickness/CreateNodesBalancethickness.cpp\ 487 484 ./modules/ModelProcessorx/Balancethickness/CreateConstraintsBalancethickness.cpp\ 488 ./modules/ModelProcessorx/Balancethickness/CreateLoadsBalancethickness.cpp489 balanced_psources =./solutions/balancethickness_core.cpp485 ./modules/ModelProcessorx/Balancethickness/CreateLoadsBalancethickness.cpp\ 486 ./solutions/balancethickness_core.cpp 490 487 #}}} 491 488 #Responses sources {{{1 … … 527 524 ./modules/ModelProcessorx/SurfaceSlope/CreateNodesSurfaceSlope.cpp \ 528 525 ./modules/ModelProcessorx/SurfaceSlope/CreateConstraintsSurfaceSlope.cpp\ 529 ./modules/ModelProcessorx/SurfaceSlope/CreateLoadsSurfaceSlope.cpp 530 slope_psources =./solutions/surfaceslope_core.cpp\526 ./modules/ModelProcessorx/SurfaceSlope/CreateLoadsSurfaceSlope.cpp\ 527 ./solutions/surfaceslope_core.cpp\ 531 528 ./solutions/bedslope_core.cpp 532 529 #}}} -
issm/trunk-jpl/src/c/matlab/io/PrintfFunction.cpp
r12013 r12016 51 51 52 52 /*Ok, if we are running in parallel, get node 0 to print*/ 53 #if defined(_PARALLEL_)54 53 if(my_rank==0)printf(buffer); 55 #else56 mexPrintf(buffer);57 #endif58 54 59 55 /*Clean up and return*/ -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/RiftConstraintsState.cpp
r9761 r12016 32 32 } 33 33 34 #ifdef _PARALLEL_35 34 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 37 36 found=mpi_found; 38 #endif39 37 40 38 return found; … … 95 93 } 96 94 97 #ifdef _PARALLEL_98 95 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 99 96 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 100 97 num_unstable_constraints=sum_num_unstable_constraints; 101 #endif102 98 103 99 /*Assign output pointers: */ … … 135 131 136 132 /*Is there just one found? that would mean we have frozen! : */ 137 #ifdef _PARALLEL_138 133 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 139 134 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 140 135 found=mpi_found; 141 #endif142 136 143 137 return found; … … 195 189 } 196 190 197 #ifdef _PARALLEL_198 191 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 199 192 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 200 193 found=mpi_found; 201 #endif202 194 203 195 return found; … … 228 220 } 229 221 230 #ifdef _PARALLEL_231 222 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 232 223 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 233 224 found=mpi_found; 234 #endif235 225 236 226 if (found){ … … 289 279 } 290 280 291 #ifdef _PARALLEL_292 281 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 293 282 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 294 283 num_unstable_constraints=sum_num_unstable_constraints; 295 #endif296 284 297 285 /*Assign output pointers: */ … … 329 317 } 330 318 331 #ifdef _PARALLEL_332 319 MPI_Reduce (&max_penetration,&mpi_max_penetration,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 333 320 MPI_Bcast(&mpi_max_penetration,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 334 321 max_penetration=mpi_max_penetration; 335 #endif336 322 337 323 /*feed max_penetration to inputs: */ … … 368 354 } 369 355 370 #ifdef _PARALLEL_371 356 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 372 357 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 373 358 num_unstable_constraints=sum_num_unstable_constraints; 374 #endif375 359 376 360 return num_unstable_constraints; -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalConstraintsState.cpp
r9883 r12016 36 36 } 37 37 38 #ifdef _PARALLEL_39 38 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 40 39 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 41 40 num_unstable_constraints=sum_num_unstable_constraints; 42 #endif43 41 44 42 /*Have we converged? : */ -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalIsPresent.cpp
r9883 r12016 28 28 } 29 29 30 #ifdef _PARALLEL_31 30 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 32 31 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 33 32 found=mpi_found; 34 #endif35 33 36 34 return found; -
issm/trunk-jpl/src/c/modules/Dakotax/Dakotax.cpp
r12011 r12016 72 72 parameters->FindParam(&dakota_error_file,QmuErrNameEnum); 73 73 74 #ifdef _PARALLEL_75 74 if(my_rank==0){ 76 #endif77 75 78 76 // Instantiate/initialize the parallel library and problem description … … 111 109 selected_strategy.run_strategy(); 112 110 113 #ifdef _PARALLEL_114 111 //Warn other cpus that we are done running the dakota iterator, by setting the counter to -1: 115 112 SpawnCore(NULL,0, NULL,NULL,0,femmodel,-1); 116 #endif117 113 118 #ifdef _PARALLEL_119 114 } 120 115 else{ … … 124 119 } 125 120 } 126 #endif //#ifdef _PARALLEL_127 121 128 122 /*Free ressources:*/ -
issm/trunk-jpl/src/c/modules/InputConvergencex/InputConvergencex.cpp
r9761 r12016 30 30 31 31 /*In parallel, we need to gather the converged status: */ 32 #ifdef _PARALLEL_33 32 MPI_Allreduce ( (void*)&num_notconverged,(void*)&total_notconverged,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 34 33 num_notconverged=total_notconverged; 35 #endif36 34 _printf_(VerboseConvergence()," #elements above convergence criterion = %i\n",num_notconverged); 37 35 -
issm/trunk-jpl/src/c/modules/MassFluxx/MassFluxx.cpp
r8263 r12016 59 59 } 60 60 61 #ifdef _PARALLEL_62 61 MPI_Allreduce ( (void*)&mass_flux,(void*)&all_mass_flux,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); 63 62 mass_flux=all_mass_flux; 64 #endif65 63 66 64 /*Free ressources:*/ -
issm/trunk-jpl/src/c/modules/MaxAbsVxx/MaxAbsVxx.cpp
r5870 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out maximum across the cluster: */ 35 34 MPI_Reduce (&maxabsvx,&node_maxabsvx,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_maxabsvx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 maxabsvx=node_maxabsvx; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxAbsVyx/MaxAbsVyx.cpp
r5871 r12016 32 32 } 33 33 34 #ifdef _PARALLEL_35 34 /*Figure out maximum across the cluster: */ 36 35 MPI_Reduce (&maxabsvy,&node_maxabsvy,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 37 36 MPI_Bcast(&node_maxabsvy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 38 37 maxabsvy=node_maxabsvy; 39 #endif40 38 41 39 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxAbsVzx/MaxAbsVzx.cpp
r5414 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out minimum across the cluster: */ 35 34 MPI_Reduce (&maxabsvz,&node_maxabsvz,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_maxabsvz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 maxabsvz=node_maxabsvz; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVelx/MaxVelx.cpp
r5414 r12016 32 32 } 33 33 34 #ifdef _PARALLEL_35 34 /*Figure out maximum across the cluster: */ 36 35 MPI_Reduce (&maxvel,&node_maxvel,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 37 36 MPI_Bcast(&node_maxvel,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 38 37 maxvel=node_maxvel; 39 #endif40 38 41 39 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVxx/MaxVxx.cpp
r5414 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out minimum across the cluster: */ 35 34 MPI_Reduce (&maxvx,&node_maxvx,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_maxvx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 maxvx=node_maxvx; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVyx/MaxVyx.cpp
r5414 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out minimum across the cluster: */ 35 34 MPI_Reduce (&maxvy,&node_maxvy,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_maxvy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 maxvy=node_maxvy; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVzx/MaxVzx.cpp
r5414 r12016 32 32 } 33 33 34 #ifdef _PARALLEL_35 34 /*Figure out minimum across the cluster: */ 36 35 MPI_Reduce (&maxvz,&node_maxvz,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 37 36 MPI_Bcast(&node_maxvz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 38 37 maxvz=node_maxvz; 39 #endif40 38 41 39 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MinVelx/MinVelx.cpp
r5414 r12016 32 32 } 33 33 34 #ifdef _PARALLEL_35 34 /*Figure out minimum across the cluster: */ 36 35 MPI_Reduce (&minvel,&node_minvel,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 37 36 MPI_Bcast(&node_minvel,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 38 37 minvel=node_minvel; 39 #endif40 38 41 39 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MinVxx/MinVxx.cpp
r5414 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out minimum across the cluster: */ 35 34 MPI_Reduce (&minvx,&node_minvx,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_minvx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 minvx=node_minvx; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MinVyx/MinVyx.cpp
r5414 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out minimum across the cluster: */ 35 34 MPI_Reduce (&minvy,&node_minvy,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_minvy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 minvy=node_minvy; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MinVzx/MinVzx.cpp
r5414 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out minimum across the cluster: */ 35 34 MPI_Reduce (&minvz,&node_minvz,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_minvz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 minvz=node_minvz; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/ModelProcessorx/ElementsAndVerticesPartitioning.cpp
r9733 r12016 63 63 else elements_width=6; //penta elements 64 64 65 #ifdef _PARALLEL_66 65 /*Determine parallel partitioning of elements: we use Metis for now. First load the data, then partition*/ 67 66 if(dim==2){ … … 79 78 xfree((void**)&elements); 80 79 xfree((void**)&elements2d); 81 82 #else83 /*In serial mode, epart is full of 0: all elements belong to cpu 0: */84 epart=(int*)xcalloc(numberofelements,sizeof(int));85 #endif86 80 87 81 /*Deal with rifts, they have to be included into one partition only, not several: */ -
issm/trunk-jpl/src/c/modules/ParsePetscOptionsx/ParsePetscOptionsx.cpp
r11945 r12016 92 92 } 93 93 94 #ifdef _PARALLEL_95 94 /*Ok, broadcast to other cpus: */ 96 95 MPI_Bcast(&numanalyses,1,MPI_INT,0,MPI_COMM_WORLD); … … 111 110 if(my_rank!=0)strings[i]=string; 112 111 } 113 #endif114 112 115 113 /*Ok, out of strings and analyses and numanalyses, create parameters, and plug them into parameters container: */ -
issm/trunk-jpl/src/c/modules/Solverx/SolverxPetsc.cpp
r11679 r12016 123 123 KSPSetFromOptions(ksp); 124 124 125 #if defined(_SERIAL_) &&_PETSC_MAJOR_==3125 #if _PETSC_MAJOR_==3 126 126 /*Specific solver?: */ 127 127 KSPGetPC(ksp,&pc); … … 133 133 #endif 134 134 } 135 #endif136 135 137 #if defined(_PARALLEL_) && _PETSC_MAJOR_==3138 136 /*Stokes: */ 139 137 if (solver_type==StokesSolverEnum){ -
issm/trunk-jpl/src/c/modules/TimeAdaptx/TimeAdaptx.cpp
r6130 r12016 31 31 } 32 32 33 #ifdef _PARALLEL_34 33 /*Figure out minimum across the cluster: */ 35 34 MPI_Reduce (&min_dt,&node_min_dt,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 36 35 MPI_Bcast(&node_min_dt,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 36 min_dt=node_min_dt; 38 #endif39 37 40 38 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/objects/FemModel.cpp
r11695 r12016 22 22 /*FUNCTION FemModel::constructor {{{1*/ 23 23 FemModel::FemModel(char* inputfilename, char* outputfilename, const int in_solution_type,const int* analyses,const int nummodels){ 24 #ifdef _PARALLEL_25 24 26 25 /*intermediary*/ … … 75 74 /*Add output file name to parameters: */ 76 75 this->parameters->AddObject(new StringParam(OutputfilenameEnum,outputfilename)); 77 78 #endif79 76 80 77 } -
issm/trunk-jpl/src/c/solutions/issm.cpp
r11983 r12016 36 36 37 37 MODULEBOOT(); 38 39 #ifndef _PARALLEL_40 _error_(" parallel executable was compiled without support of parallel libraries!");41 #endif42 38 43 39 /*Initialize environments: Petsc, MPI, etc...: */
Note:
See TracChangeset
for help on using the changeset viewer.