Changeset 12102
- Timestamp:
- 04/20/12 19:07:47 (13 years ago)
- Location:
- issm/trunk-jpl
- Files:
-
- 1 added
- 2 deleted
- 55 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/configs/config-macosx64-larour-nopetsc.sh
r11931 r12102 1 1 #!/bin/sh 2 2 3 #petsc 3.2 4 #mpich 1.4 3 pythonversion=2.7 5 4 6 5 ./configure \ 7 6 --prefix=$ISSM_TIER \ 8 --with-mpi-include=$ISSM_TIER/externalpackages/mpich2/install/include \9 --with-mpi-lib=" $ISSM_TIER/externalpackages/mpich2/install/lib/libpmpich.a $ISSM_TIER/externalpackages/mpich2/install/lib/libmpich.a $ISSM_TIER/externalpackages/mpich2/install/lib/libmpl.a " \10 7 --with-matlab-dir=$MATLAB_DIR \ 8 --with-python-dir=$ISSM_TIER/externalpackages/python/install\ 9 --with-python-version=$pythonversion \ 10 --with-python-numpy-dir=$ISSM_TIER/externalpackages/python/install/Python.framework/Versions/$pythonversion/lib/python$pythonversion/site-packages/numpy/core/include/numpy\ 11 11 --with-triangle-dir=$ISSM_TIER/externalpackages/triangle/install \ 12 --with-metis-dir=$ISSM_TIER/externalpackages/metis/install \13 --with-chaco-dir=$ISSM_TIER/externalpackages/chaco/install \14 --with-fortran-lib="/usr/local/gfortran/lib/libgfortran.a" \15 12 --with-math-lib="/usr/lib/libm.dylib" \ 16 --with-blas-lapack-dir=$ISSM_TIER/externalpackages/petsc/install/lib \17 13 --with-graphics-lib="/usr/X11/lib/libX11.dylib" \ 18 14 --with-cxxoptflags=" -fno-common -no-cpp-precomp -fexceptions -arch x86_64 -mmacosx-version-min=10.5 -O3 -DNDEBUG -w "\ 19 --with-numthreads=8 \20 15 --with-gsl-dir=$ISSM_TIER/externalpackages/gsl/install 21 # --with-petsc-dir=$ISSM_TIER/externalpackages/petsc/install \22 # --with-petsc-arch=$ISSM_ARCH \23 # --with-plapack-lib="-L$ISSM_TIER/externalpackages/petsc/install/ -lPLAPACK" \24 # --with-plapack-include="-I$ISSM_TIER/externalpackages/petsc/install/include/ " \25 # --with-blacs-dir=$ISSM_TIER/externalpackages/petsc/install/ \26 # --with-scalapack-dir=$ISSM_TIER/externalpackages/petsc/install/ \27 # --with-mumps-dir=$ISSM_TIER/externalpackages/petsc/install/28 # --with-dakota-dir=$ISSM_TIER/externalpackages/dakota/install \ -
issm/trunk-jpl/externalpackages/nose/install-macosx64sh
r11841 r12102 1 1 #/bin/bash 2 #Install Python 3nose module2 #Install Python nose module 3 3 4 rm -rf py3k 4 pythonversion=2 5 6 rm -rf src 5 7 6 svn checkout http://python-nose.googlecode.com/svn/branches/py3k 7 cd py3k 8 if [[ $pythonversion == "3" ]];then 8 9 9 python3.2 ./setup.py build 10 python3.2 ./setup.py install 10 svn checkout http://python-nose.googlecode.com/svn/branches/py3k 11 mv py3k src 12 13 cd src 14 python ./setup.py build 15 python ./setup.py install 16 fi 17 18 if [[ $pythonversion == "2" ]];then 19 20 tar -zxvf nose-1.1.2.tar.gz 21 mv nose-1.1.2 src 22 rm -rf nose-1.1.2 23 24 cd src 25 python ./setup.py build 26 python ./setup.py install 27 fi -
issm/trunk-jpl/externalpackages/scipy/install-macosx64.sh
r11944 r12102 19 19 #install numpy 20 20 cd numpy 21 python 3.2setup.py build22 python 3.2setup.py install21 python setup.py build 22 python setup.py install 23 23 24 24 … … 30 30 #install scipy 31 31 cd scipy 32 python 3.2setup.py build33 python 3.2setup.py install32 python setup.py build 33 python setup.py install 34 34 35 35 elif [[ $install == "3" ]];then -
issm/trunk-jpl/m4/issm_options.m4
r12092 r12102 572 572 HAVE_METIS=no 573 573 fi 574 575 if test $HAVE_METIS = no; then 576 AC_MSG_ERROR([--with-metis-dir missing. Metis is needed to run ISSM]); 577 else 578 AC_MSG_RESULT($HAVE_METIS) 579 fi 574 AM_CONDITIONAL([METIS], [test x$HAVE_METIS = xyes]) 580 575 dnl }}} 581 576 dnl mpi{{{ … … 603 598 fi 604 599 fi 600 AM_CONDITIONAL([MPI], [test x$HAVE_MPI = xyes]) 605 601 AC_MSG_RESULT($HAVE_MPI) 606 602 dnl }}} … … 1228 1224 fi 1229 1225 1226 dnl check that if we have MPI, we have metis 1227 if test "$HAVE_METIS" = "yes" && test "$HAVE_MPI" = "no" ; then 1228 AC_MSG_ERROR([need mpi if using the metis partitioner!]); 1229 fi 1230 1231 1232 1233 1234 1230 1235 AC_MSG_RESULT(done) 1231 1236 dnl }}} -
issm/trunk-jpl/src/Makefile.am
r12075 r12102 1 EXTRA_DIST = perl pro py2 SUBDIRS = c modules m 1 EXTRA_DIST = perl pro 2 SUBDIRS = c modules m py -
issm/trunk-jpl/src/c/Container/Constraints.cpp
r12016 r12102 48 48 49 49 /*figure out total number of constraints combining all the cpus (no clones here)*/ 50 MPI_Reduce(&localconstraints,&numberofconstraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 51 MPI_Bcast(&numberofconstraints,1,MPI_INT,0,MPI_COMM_WORLD); 50 #ifdef _HAVE_MPI_ 51 MPI_Reduce(&localconstraints,&numberofconstraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 52 MPI_Bcast(&numberofconstraints,1,MPI_INT,0,MPI_COMM_WORLD); 53 #else 54 numberofconstraints=localconstraints; 55 #endif 56 52 57 53 58 return numberofconstraints; -
issm/trunk-jpl/src/c/Container/Elements.cpp
r12016 r12102 125 125 126 126 /*Synchronize across cluster, so as to not end up with different sizes for each patch on each cpu: */ 127 #ifdef _HAVE_MPI_ 127 128 MPI_Reduce (&numvertices,&max_numvertices,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 128 129 MPI_Bcast(&max_numvertices,1,MPI_INT,0,MPI_COMM_WORLD); … … 132 133 MPI_Bcast(&max_numnodes,1,MPI_INT,0,MPI_COMM_WORLD); 133 134 numnodes=max_numnodes; 135 #endif 134 136 135 137 /*Ok, initialize Patch object: */ … … 191 193 192 194 /*Get rank of first cpu that has results*/ 195 #ifdef _HAVE_MPI_ 193 196 if(this->Size()) rank=my_rank; 194 197 else rank=num_procs; 195 198 MPI_Allreduce (&rank,&minrank,1,MPI_INT,MPI_MIN,MPI_COMM_WORLD); 199 #else 200 minrank=my_rank; 201 #endif 196 202 197 203 /*see what the first element of this partition has in stock (this is common to all partitions)*/ … … 201 207 element->ListResultsInfo(&resultsenums,&resultssizes,&resultstimes,&resultssteps,&numberofresults); 202 208 } 209 #ifdef _HAVE_MPI_ 203 210 MPI_Bcast(&numberofresults,1,MPI_DOUBLE,minrank,MPI_COMM_WORLD); 211 #endif 204 212 205 213 /*Get out if there is no results. Otherwise broadcast info*/ 206 214 if(!numberofresults) return; 215 #ifdef _HAVE_MPI_ 207 216 if(my_rank!=minrank){ 208 217 resultsenums=(int*)xmalloc(numberofresults*sizeof(int)); … … 215 224 MPI_Bcast(resultstimes,numberofresults,MPI_DOUBLE,minrank,MPI_COMM_WORLD); 216 225 MPI_Bcast(resultssteps,numberofresults,MPI_INT,minrank,MPI_COMM_WORLD); 226 #endif 217 227 218 228 /*Loop over all results and get nodal vector*/ … … 273 283 274 284 local_nelem=this->Size(); 285 #ifdef _HAVE_MPI_ 275 286 MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 287 #else 288 numberofelements=local_nelem; 289 #endif 276 290 277 291 return numberofelements; -
issm/trunk-jpl/src/c/Container/Loads.cpp
r12016 r12102 63 63 64 64 /*figure out total number of loads combining all the cpus (no clones here)*/ 65 #ifdef _HAVE_MPI_ 65 66 MPI_Reduce(&localloads,&numberofloads,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 66 67 MPI_Bcast(&numberofloads,1,MPI_INT,0,MPI_COMM_WORLD); 68 #else 69 numberofloads=localloads; 70 #endif 71 67 72 68 73 return numberofloads; -
issm/trunk-jpl/src/c/Container/Nodes.cpp
r12016 r12102 83 83 * 0. This means the dofs between all the cpus are not unique. We now offset the dofs of eache 84 84 * cpus by the total last dofs of the previus cpu, starting from 0. 85 * First: bet number of dofs for each cpu*/85 * First: get number of dofs for each cpu*/ 86 86 alldofcount=(int*)xmalloc(num_procs*sizeof(int)); 87 #ifdef _HAVE_MPI_ 87 88 MPI_Gather(&dofcount,1,MPI_INT,alldofcount,1,MPI_INT,0,MPI_COMM_WORLD); 88 89 MPI_Bcast(alldofcount,num_procs,MPI_INT,0,MPI_COMM_WORLD); 90 #else 91 alldofcount[0]=dofcount; 92 #endif 89 93 90 94 /* Every cpu should start its own dof count at the end of the dofcount from cpu-1*/ … … 119 123 } 120 124 } 125 #ifdef _HAVE_MPI_ 121 126 MPI_Allreduce ( (void*)truedofs,(void*)alltruedofs,numnodes*maxdofspernode,MPI_INT,MPI_MAX,MPI_COMM_WORLD); 127 #else 128 alltruedofs[0]=truedofs[0]; 129 #endif 122 130 123 131 /* Now every cpu knows the true dofs of everyone else that is not a clone*/ … … 162 170 * dealt with by another cpu. We take the minimum because we are going to manage dof assignment in increasing 163 171 * order of cpu rank. This is also why we initialized this array to num_procs.*/ 172 #ifdef _HAVE_MPI_ 164 173 MPI_Allreduce ( (void*)ranks,(void*)minranks,numnodes,MPI_INT,MPI_MIN,MPI_COMM_WORLD); 174 #else 175 for(i=0;i<numnodes;i++)minranks[i]=ranks[i]; 176 #endif 165 177 166 178 /*Now go through all objects, and use minranks to flag which objects are cloned: */ … … 205 217 206 218 /*Grab max of all cpus: */ 219 #ifdef _HAVE_MPI_ 207 220 MPI_Allreduce ( (void*)&max,(void*)&allmax,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); 208 max=allmax;221 #endif 209 222 210 223 return max; … … 237 250 238 251 /*Gather from all cpus: */ 252 #ifdef _HAVE_MPI_ 239 253 MPI_Allreduce ( (void*)&numdofs,(void*)&allnumdofs,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 254 #else 255 allnumdofs=numdofs; 256 #endif 240 257 return allnumdofs; 241 258 } … … 260 277 261 278 /*Gather from all cpus: */ 279 #ifdef _HAVE_MPI_ 262 280 MPI_Allreduce ( (void*)&numnodes,(void*)&allnumnodes,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 281 #else 282 allnumnodes=numnodes; 283 #endif 263 284 264 285 return allnumnodes; … … 286 307 } 287 308 309 #ifdef _HAVE_MPI_ 288 310 MPI_Reduce (&max_sid,&node_max_sid,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 289 311 MPI_Bcast(&node_max_sid,1,MPI_INT,0,MPI_COMM_WORLD); 290 312 max_sid=node_max_sid; 313 #endif 291 314 292 315 if(max_sid==1){ -
issm/trunk-jpl/src/c/Container/Vertices.cpp
r12016 r12102 61 61 * First: bet number of dofs for each cpu*/ 62 62 alldofcount=(int*)xmalloc(num_procs*sizeof(int)); 63 #ifdef _HAVE_MPI_ 63 64 MPI_Gather(&dofcount,1,MPI_INT,alldofcount,1,MPI_INT,0,MPI_COMM_WORLD); 64 65 MPI_Bcast(alldofcount,num_procs,MPI_INT,0,MPI_COMM_WORLD); 66 #else 67 alldofcount[0]=dofcount; 68 #endif 65 69 66 70 /* Every cpu should start its own dof count at the end of the dofcount from cpu-1*/ … … 85 89 vertex->ShowTrueDofs(truedofs); 86 90 } 91 #ifdef _HAVE_MPI_ 87 92 MPI_Allreduce((void*)truedofs,(void*)alltruedofs,numberofobjects*numberofdofsperobject,MPI_INT,MPI_MAX,MPI_COMM_WORLD); 93 #else 94 for(i=0;i<numberofobjects*numberofdofsperobject;i++)alltruedofs[i]=truedofs[i]; 95 #endif 88 96 89 97 /* Now every cpu knows the true dofs of everyone else that is not a clone*/ … … 121 129 * dealt with by another cpu. We take the minimum because we are going to manage dof assignment in increasing 122 130 * order of cpu rank. This is also why we initialized this array to num_procs.*/ 131 #ifdef _HAVE_MPI_ 123 132 MPI_Allreduce ( (void*)ranks,(void*)minranks,numberofobjects,MPI_INT,MPI_MIN,MPI_COMM_WORLD); 133 #else 134 for(i=0;i<numberofobjects;i++)minranks[i]=ranks[i]; 135 #endif 124 136 125 137 /*Now go through all objects, and use minranks to flag which objects are cloned: */ … … 149 161 } 150 162 163 #ifdef _HAVE_MPI_ 151 164 MPI_Reduce (&max_sid,&vertex_max_sid,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 152 165 MPI_Bcast(&vertex_max_sid,1,MPI_INT,0,MPI_COMM_WORLD); 153 166 max_sid=vertex_max_sid; 167 #endif 154 168 155 169 /*sid starts at 0*/ -
issm/trunk-jpl/src/c/Makefile.am
r12093 r12102 206 206 ./shared/Wrapper/ModuleBoot.cpp\ 207 207 ./shared/Wrapper/ModuleEnd.cpp\ 208 ./toolkits/mpi/mpiincludes.h\209 ./toolkits/mpi/patches/mpipatches.h\210 ./toolkits/mpi/patches/DetermineLocalSize.cpp\211 ./toolkits/mpi/patches/MPI_Upperrow.cpp\212 ./toolkits/mpi/patches/MPI_Lowerrow.cpp\213 ./toolkits/mpi/patches/MPI_Boundariesfromrange.cpp\214 208 ./toolkits/metis/metisincludes.h\ 215 209 ./toolkits/issm/issmtoolkit.h\ … … 218 212 ./toolkits/issm/SeqMat.h\ 219 213 ./toolkits/issm/SeqMat.cpp\ 220 ./toolkits/metis/patches/metispatches.h\221 ./toolkits/metis/patches/METIS_PartMeshNodalPatch.cpp\222 214 ./toolkits/triangle/triangleincludes.h\ 223 215 ./toolkitsenums.h\ … … 700 692 ./toolkits/petsc/patches/SerialToVec.cpp\ 701 693 ./toolkits/petsc/patches/VecFree.cpp\ 694 ./toolkits/petsc/patches/PetscMatrixToDoubleMatrix.cpp\ 695 ./toolkits/petsc/patches/PetscVectorToDoubleVector.cpp\ 702 696 ./toolkits/petsc/patches/VecDuplicatePatch.cpp\ 703 697 ./toolkits/petsc/patches/KSPFree.cpp\ … … 722 716 723 717 #}}} 718 #Mpi sources {{{1 719 mpi_sources= ./toolkits/mpi/mpiincludes.h\ 720 ./toolkits/mpi/patches/mpipatches.h\ 721 ./toolkits/mpi/patches/DetermineLocalSize.cpp\ 722 ./toolkits/mpi/patches/MPI_Upperrow.cpp\ 723 ./toolkits/mpi/patches/MPI_Lowerrow.cpp\ 724 ./toolkits/mpi/patches/MPI_Boundariesfromrange.cpp 725 #}}} 726 #Metis sources {{{1 727 metis_sources= ./toolkits/metis/patches/metispatches.h\ 728 ./toolkits/metis/patches/METIS_PartMeshNodalPatch.cpp 729 #}}} 724 730 #Python sources {{{1 725 731 python_sources= ./python/io/pythonio.h\ … … 744 750 ./matlab/io/MatlabVectorToDoubleVector.cpp\ 745 751 ./matlab/io/MatlabMatrixToDoubleMatrix.cpp\ 746 ./matlab/io/MatlabMatrixToPetscMatrix.cpp\747 ./matlab/io/MatlabVectorToPetscVector.cpp\748 ./matlab/io/PetscMatrixToDoubleMatrix.cpp\749 ./matlab/io/PetscVectorToDoubleVector.cpp\750 752 ./matlab/io/MatlabMatrixToSeqMat.cpp\ 751 753 ./matlab/io/MatlabVectorToSeqVec.cpp 754 #}}} 755 #Matlab and Petsc sources {{{1 756 matlabpetsc_sources= ./matlab/io/MatlabMatrixToPetscMatrix.cpp\ 757 ./matlab/io/MatlabVectorToPetscVector.cpp 758 752 759 #}}} 753 760 #Modules sources{{{1 … … 911 918 issm_sources += $(threed_sources) 912 919 endif 920 921 if MPI 922 issm_sources += $(mpi_sources) 923 endif 924 925 if METIS 926 issm_sources += $(metis_sources) 927 endif 928 929 if PETSC 930 if MATLAB 931 issm_sources += $(matlabpetsc_sources) 932 endif 933 endif 934 935 913 936 #}}} 914 937 #Library flags and sources {{{1 -
issm/trunk-jpl/src/c/matlab/io/WriteMatlabData.cpp
r12046 r12102 231 231 #else 232 232 matrix_ptr=matrix->matrix->ToSerial(); 233 matrix->matrix->GetSize(&rows, cols);233 matrix->matrix->GetSize(&rows,&cols); 234 234 #endif 235 235 -
issm/trunk-jpl/src/c/matlab/io/matlabio.h
r12051 r12102 72 72 int MatlabNArrayToNArray(bool** pmatrix,int* pmatrix_numel,int* pmatrix_ndims,int** pmatrix_size,const mxArray* mxmatrix); 73 73 int MatlabNArrayToNArray(char** pmatrix,int* pmatrix_numel,int* pmatrix_ndims,int** pmatrix_size,const mxArray* mxmatrix); 74 int MatlabMatrixToPetscMatrix(Mat* matrix,int* prows,int* pcols, const mxArray* mxmatrix);75 76 /*Matlab to Petsc routines: */77 int MatlabVectorToPetscVector(Vec* pvector,int* pvector_rows,const mxArray* mxvector);78 void PetscMatrixToDoubleMatrix(double** pmatrix, int* prows, int* pcols,Mat matrix);79 void PetscVectorToDoubleVector(double** pvector, int* prows, Vec vector);80 74 81 75 /*Matlab to SeqMat routines: */ … … 83 77 SeqVec* MatlabVectorToSeqVec(const mxArray* dataref); 84 78 79 /*Matlab to Petsc routines: */ 80 #ifdef _HAVE_PETSC_ 81 int MatlabMatrixToPetscMatrix(Mat* matrix,int* prows,int* pcols, const mxArray* mxmatrix); 82 int MatlabVectorToPetscVector(Vec* pvector,int* pvector_rows,const mxArray* mxvector); 83 #endif 84 85 85 86 #endif /* _IO_H_ */ -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/RiftConstraintsState.cpp
r12016 r12102 32 32 } 33 33 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 36 37 found=mpi_found; 38 #endif 37 39 38 40 return found; … … 93 95 } 94 96 97 #ifdef _HAVE_MPI_ 95 98 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 96 99 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 97 100 num_unstable_constraints=sum_num_unstable_constraints; 101 #endif 98 102 99 103 /*Assign output pointers: */ … … 131 135 132 136 /*Is there just one found? that would mean we have frozen! : */ 137 #ifdef _HAVE_MPI_ 133 138 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); 134 139 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 135 140 found=mpi_found; 141 #endif 136 142 137 143 return found; … … 189 195 } 190 196 197 #ifdef _HAVE_MPI_ 191 198 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 192 199 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 193 200 found=mpi_found; 201 #endif 194 202 195 203 return found; … … 220 228 } 221 229 230 #ifdef _HAVE_MPI_ 222 231 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 223 232 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 224 233 found=mpi_found; 234 #endif 225 235 226 236 if (found){ … … 279 289 } 280 290 291 #ifdef _HAVE_MPI_ 281 292 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 282 293 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 283 294 num_unstable_constraints=sum_num_unstable_constraints; 295 #endif 284 296 285 297 /*Assign output pointers: */ … … 317 329 } 318 330 331 #ifdef _HAVE_MPI_ 319 332 MPI_Reduce (&max_penetration,&mpi_max_penetration,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 320 333 MPI_Bcast(&mpi_max_penetration,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 321 334 max_penetration=mpi_max_penetration; 335 #endif 322 336 323 337 /*feed max_penetration to inputs: */ … … 354 368 } 355 369 370 #ifdef _HAVE_MPI_ 356 371 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 357 372 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 358 373 num_unstable_constraints=sum_num_unstable_constraints; 374 #endif 359 375 360 376 return num_unstable_constraints; -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalConstraintsState.cpp
r12016 r12102 36 36 } 37 37 38 #ifdef _HAVE_MPI_ 38 39 MPI_Reduce (&num_unstable_constraints,&sum_num_unstable_constraints,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 39 40 MPI_Bcast(&sum_num_unstable_constraints,1,MPI_INT,0,MPI_COMM_WORLD); 40 41 num_unstable_constraints=sum_num_unstable_constraints; 42 #endif 41 43 42 44 /*Have we converged? : */ -
issm/trunk-jpl/src/c/modules/ConstraintsStatex/ThermalIsPresent.cpp
r12016 r12102 28 28 } 29 29 30 #ifdef _HAVE_MPI_ 30 31 MPI_Reduce (&found,&mpi_found,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 31 32 MPI_Bcast(&mpi_found,1,MPI_INT,0,MPI_COMM_WORLD); 32 33 found=mpi_found; 34 #endif 33 35 34 36 return found; -
issm/trunk-jpl/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp
r8608 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/ElementResponsex/ElementResponsex.cpp
r10703 r12102 37 37 38 38 /*Broadcast whether we found the element: */ 39 #ifdef _HAVE_MPI_ 39 40 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 40 41 if(!sumfound)_error_("%s%i%s","could not find material with id",index," to compute ElementResponse"); 42 #endif 41 43 42 44 /*Ok, we found the element, compute responseocity: */ … … 46 48 47 49 /*Broadcast and plug into response: */ 50 #ifdef _HAVE_MPI_ 48 51 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); 49 52 MPI_Bcast(&response,1,MPI_DOUBLE,cpu_found,MPI_COMM_WORLD); 53 #endif 50 54 51 55 *presponse=response; -
issm/trunk-jpl/src/c/modules/GroundinglineMigrationx/GroundinglineMigrationx.cpp
r11695 r12102 156 156 vec_nodes_on_floatingice->Assemble(); 157 157 158 #ifdef _HAVE_MPI_ 158 159 MPI_Allreduce(&local_nflipped,&nflipped,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 159 160 _printf_(VerboseConvergence()," Additional number of vertices allowed to unground: %i\n",nflipped); 161 #else 162 nflipped=local_nflipped; 163 #endif 160 164 161 165 /*Avoid leaks: */ -
issm/trunk-jpl/src/c/modules/IceVolumex/IceVolumex.cpp
r9880 r12102 19 19 local_ice_volume+=element->IceVolume(); 20 20 } 21 #ifdef _HAVE_MPI_ 21 22 MPI_Reduce(&local_ice_volume,&total_ice_volume,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 22 23 MPI_Bcast(&total_ice_volume,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 24 #else 25 total_ice_volume=local_ice_volume; 26 #endif 23 27 24 28 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/InputConvergencex/InputConvergencex.cpp
r12016 r12102 30 30 31 31 /*In parallel, we need to gather the converged status: */ 32 #ifdef _HAVE_MPI_ 32 33 MPI_Allreduce ( (void*)&num_notconverged,(void*)&total_notconverged,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 33 34 num_notconverged=total_notconverged; 35 #endif 34 36 _printf_(VerboseConvergence()," #elements above convergence criterion = %i\n",num_notconverged); 35 37 -
issm/trunk-jpl/src/c/modules/MassFluxx/MassFluxx.cpp
r12016 r12102 59 59 } 60 60 61 #ifdef _HAVE_MPI_ 61 62 MPI_Allreduce ( (void*)&mass_flux,(void*)&all_mass_flux,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); 62 63 mass_flux=all_mass_flux; 64 #endif 63 65 64 66 /*Free ressources:*/ -
issm/trunk-jpl/src/c/modules/MaxAbsVxx/MaxAbsVxx.cpp
r12016 r12102 32 32 33 33 /*Figure out maximum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&maxabsvx,&node_maxabsvx,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_maxabsvx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 maxabsvx=node_maxabsvx; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxAbsVyx/MaxAbsVyx.cpp
r12016 r12102 33 33 34 34 /*Figure out maximum across the cluster: */ 35 #ifdef _HAVE_MPI_ 35 36 MPI_Reduce (&maxabsvy,&node_maxabsvy,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 36 37 MPI_Bcast(&node_maxabsvy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 38 maxabsvy=node_maxabsvy; 39 #endif 38 40 39 41 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxAbsVzx/MaxAbsVzx.cpp
r12016 r12102 32 32 33 33 /*Figure out minimum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&maxabsvz,&node_maxabsvz,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_maxabsvz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 maxabsvz=node_maxabsvz; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVelx/MaxVelx.cpp
r12016 r12102 33 33 34 34 /*Figure out maximum across the cluster: */ 35 #ifdef _HAVE_MPI_ 35 36 MPI_Reduce (&maxvel,&node_maxvel,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 36 37 MPI_Bcast(&node_maxvel,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 38 maxvel=node_maxvel; 39 #endif 38 40 39 41 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVxx/MaxVxx.cpp
r12016 r12102 32 32 33 33 /*Figure out minimum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&maxvx,&node_maxvx,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_maxvx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 maxvx=node_maxvx; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVyx/MaxVyx.cpp
r12016 r12102 32 32 33 33 /*Figure out minimum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&maxvy,&node_maxvy,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_maxvy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 maxvy=node_maxvy; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MaxVzx/MaxVzx.cpp
r12016 r12102 33 33 34 34 /*Figure out minimum across the cluster: */ 35 #ifdef _HAVE_MPI_ 35 36 MPI_Reduce (&maxvz,&node_maxvz,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD ); 36 37 MPI_Bcast(&node_maxvz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 38 maxvz=node_maxvz; 39 #endif 38 40 39 41 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MeshPartitionx/MeshPartitionx.cpp
r10000 r12102 42 42 /*Partition using Metis:*/ 43 43 if (num_procs>1){ 44 #ifdef _HAVE_METIS_ 44 45 METIS_PartMeshNodalPatch(&numberofelements,&numberofnodes, index, &etype, &numflag, &num_procs, &edgecut, epart, npart); 46 #endif 45 47 } 46 48 else if (num_procs==1){ … … 67 69 /*Partition using Metis:*/ 68 70 if (num_procs>1){ 71 #ifdef _HAVE_METIS_ 69 72 METIS_PartMeshNodalPatch(&numberofelements2d,&numberofnodes2d, index2d, &etype2d, &numflag, &num_procs, &edgecut, epart2d, npart2d); 73 #endif 70 74 } 71 75 else if (num_procs==1){ -
issm/trunk-jpl/src/c/modules/MinVelx/MinVelx.cpp
r12016 r12102 33 33 34 34 /*Figure out minimum across the cluster: */ 35 #ifdef _HAVE_MPI_ 35 36 MPI_Reduce (&minvel,&node_minvel,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 36 37 MPI_Bcast(&node_minvel,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 37 38 minvel=node_minvel; 39 #endif 38 40 39 41 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MinVxx/MinVxx.cpp
r12016 r12102 32 32 33 33 /*Figure out minimum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&minvx,&node_minvx,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_minvx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 minvx=node_minvx; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MinVyx/MinVyx.cpp
r12016 r12102 32 32 33 33 /*Figure out minimum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&minvy,&node_minvy,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_minvy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 minvy=node_minvy; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/MinVzx/MinVzx.cpp
r12016 r12102 32 32 33 33 /*Figure out minimum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&minvz,&node_minvz,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_minvz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 minvz=node_minvz; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/modules/NodalValuex/NodalValuex.cpp
r9206 r12102 36 36 37 37 /*Broadcast whether we found the element: */ 38 #ifdef _HAVE_MPI_ 38 39 MPI_Allreduce ( &found,&sumfound,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); 39 40 if(!sumfound)_error_("%s%i%s%s","could not find element with vertex with id",index," to compute nodal value ",EnumToStringx(natureofdataenum)); 41 #endif 40 42 41 43 /*Broadcast and plug into response: */ 44 #ifdef _HAVE_MPI_ 42 45 MPI_Allreduce ( &cpu_found,&cpu_found,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); 43 46 MPI_Bcast(&value,1,MPI_DOUBLE,cpu_found,MPI_COMM_WORLD); 47 #else 48 value=cpu_found; 49 #endif 44 50 45 51 *pnodalvalue=value; -
issm/trunk-jpl/src/c/modules/ParsePetscOptionsx/ParsePetscOptionsx.cpp
r12016 r12102 93 93 94 94 /*Ok, broadcast to other cpus: */ 95 #ifdef _HAVE_MPI_ 95 96 MPI_Bcast(&numanalyses,1,MPI_INT,0,MPI_COMM_WORLD); 96 97 if(my_rank!=0){ … … 99 100 } 100 101 MPI_Bcast(analyses,numanalyses,MPI_DOUBLE,0,MPI_COMM_WORLD); 102 #endif 101 103 for(i=0;i<numanalyses;i++){ 102 104 char* string=strings[i]; … … 105 107 } 106 108 if(my_rank==0)stringlength=(strlen(string)+1)*sizeof(char); 109 #ifdef _HAVE_MPI_ 107 110 MPI_Bcast(&stringlength,1,MPI_INT,0,MPI_COMM_WORLD); 108 111 if(my_rank!=0)string=(char*)xmalloc(stringlength); 109 112 MPI_Bcast(string,stringlength,MPI_CHAR,0,MPI_COMM_WORLD); 110 113 if(my_rank!=0)strings[i]=string; 114 #endif 111 115 } 112 116 -
issm/trunk-jpl/src/c/modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp
r8608 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp
r8607 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceAreax/SurfaceAreax.cpp
r5414 r12102 28 28 29 29 /*Sum all J from all cpus of the cluster:*/ 30 MPI_Reduce (&S,&S_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 #ifdef _HAVE_MPI_ 31 MPI_Reduce (&S,&S_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 31 32 MPI_Bcast(&S_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 32 33 S=S_sum; 34 #endif 33 35 34 36 /*add surface area to element inputs:*/ -
issm/trunk-jpl/src/c/modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp
r8607 r12102 31 31 32 32 /*Sum all J from all cpus of the cluster:*/ 33 #ifdef _HAVE_MPI_ 33 34 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 34 35 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 35 36 J=J_sum; 37 #endif 36 38 37 39 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp
r8607 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp
r8607 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp
r8607 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/ThicknessAbsGradientx/ThicknessAbsGradientx.cpp
r8608 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp
r8607 r12102 27 27 28 28 /*Sum all J from all cpus of the cluster:*/ 29 #ifdef _HAVE_MPI_ 29 30 MPI_Reduce (&J,&J_sum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD ); 30 31 MPI_Bcast(&J_sum,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 31 32 J=J_sum; 33 #endif 32 34 33 35 /*Assign output pointers: */ -
issm/trunk-jpl/src/c/modules/TimeAdaptx/TimeAdaptx.cpp
r12016 r12102 32 32 33 33 /*Figure out minimum across the cluster: */ 34 #ifdef _HAVE_MPI_ 34 35 MPI_Reduce (&min_dt,&node_min_dt,1,MPI_DOUBLE,MPI_MIN,0,MPI_COMM_WORLD ); 35 36 MPI_Bcast(&node_min_dt,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 36 37 min_dt=node_min_dt; 38 #endif 37 39 38 40 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/objects/Bamg/Metric.h
r9690 r12102 5 5 #include "../../shared/Bamg/shared.h" 6 6 #include "R2.h" 7 #include <math.h> 7 8 8 9 namespace bamg { -
issm/trunk-jpl/src/c/objects/IoModel.cpp
r10390 r12102 198 198 /*Ok, we have reached the end of the file. break: */ 199 199 record_code=0; //0 means bailout 200 #ifdef _HAVE_MPI_ 200 201 MPI_Bcast(&record_code,1,MPI_INT,0,MPI_COMM_WORLD); /*tell others cpus we are bailing: */ 202 #endif 201 203 break; 202 204 } … … 207 209 fread(&record_code,sizeof(int),1,this->fid); 208 210 211 #ifdef _HAVE_MPI_ 209 212 /*Tell other cpus what we are doing: */ 210 213 MPI_Bcast(&record_code,1,MPI_INT,0,MPI_COMM_WORLD); /*tell other cpus what we are going to do: */ … … 213 216 MPI_Bcast(&record_enum,1,MPI_INT,0,MPI_COMM_WORLD); 214 217 MPI_Bcast(&record_length,1,MPI_INT,0,MPI_COMM_WORLD); 218 #endif 215 219 216 217 220 switch(record_code){ 218 221 case 1: 219 222 /*Read the boolean and broadcast it to other cpus:*/ 220 223 if(fread(&booleanint,sizeof(int),1,this->fid)!=1) _error_(" could not read boolean "); 224 #ifdef _HAVE_MPI_ 221 225 MPI_Bcast(&booleanint,1,MPI_INT,0,MPI_COMM_WORLD); 226 #endif 222 227 223 228 /*create BoolParam: */ … … 228 233 /*Read the integer and broadcast it to other cpus:*/ 229 234 if(fread(&integer,sizeof(int),1,this->fid)!=1) _error_(" could not read integer "); 235 #ifdef _HAVE_MPI_ 230 236 MPI_Bcast(&integer,1,MPI_INT,0,MPI_COMM_WORLD); 237 #endif 231 238 232 239 /*create IntParam: */ … … 237 244 /*Read the scalar and broadcast it to other cpus:*/ 238 245 if(fread(&scalar,sizeof(double),1,this->fid)!=1) _error_(" could not read scalar "); 246 #ifdef _HAVE_MPI_ 239 247 MPI_Bcast(&scalar,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 248 #endif 240 249 241 250 /*create DoubleParam: */ … … 246 255 /*We have to read a string from disk. First read the dimensions of the string, then the string: */ 247 256 if(fread(&string_size,sizeof(int),1,this->fid)!=1) _error_(" could not read length of string "); 257 #ifdef _HAVE_MPI_ 248 258 MPI_Bcast(&string_size,1,MPI_INT,0,MPI_COMM_WORLD); 259 #endif 249 260 250 261 if(string_size){ … … 254 265 /*Read string, then broadcast: */ 255 266 if(fread(string,string_size*sizeof(char),1,this->fid)!=1)_error_(" could not read string "); 267 #ifdef _HAVE_MPI_ 256 268 MPI_Bcast(string,string_size,MPI_CHAR,0,MPI_COMM_WORLD); 269 #endif 257 270 } 258 271 else{ … … 308 321 } 309 322 } //}}} 323 #ifdef _HAVE_MPI_ 310 324 else{ //cpu ~0 {{{2 311 325 for(;;){ //wait on cpu 0 … … 377 391 } 378 392 } //}}} 393 #endif 379 394 } 380 395 /*}}}*/ … … 399 414 if(fread(&booleanint,sizeof(int),1,fid)!=1) _error_(" could not read boolean "); 400 415 } 416 #ifdef _HAVE_MPI_ 401 417 MPI_Bcast(&booleanint,1,MPI_INT,0,MPI_COMM_WORLD); 418 #endif 402 419 403 420 /*cast to bool: */ … … 428 445 } 429 446 447 #ifdef _HAVE_MPI_ 430 448 MPI_Bcast(&integer,1,MPI_INT,0,MPI_COMM_WORLD); 449 #endif 431 450 432 451 /*Assign output pointers: */ … … 456 475 if(fread(&scalar,sizeof(double),1,fid)!=1)_error_(" could not read scalar "); 457 476 } 477 #ifdef _HAVE_MPI_ 458 478 MPI_Bcast(&scalar,1,MPI_DOUBLE,0,MPI_COMM_WORLD); 479 #endif 459 480 460 481 /*Assign output pointers: */ … … 487 508 } 488 509 510 #ifdef _HAVE_MPI_ 489 511 MPI_Bcast(&string_size,1,MPI_INT,0,MPI_COMM_WORLD); 512 #endif 490 513 491 514 /*Now allocate string: */ … … 498 521 if(fread(string,string_size*sizeof(char),1,fid)!=1)_error_(" could not read string "); 499 522 } 523 #ifdef _HAVE_MPI_ 500 524 MPI_Bcast(string,string_size,MPI_CHAR,0,MPI_COMM_WORLD); 525 #endif 501 526 } 502 527 else{ … … 538 563 } 539 564 565 #ifdef _HAVE_MPI_ 540 566 MPI_Bcast(&M,1,MPI_INT,0,MPI_COMM_WORLD); 567 #endif 541 568 542 569 if(my_rank==0){ 543 570 if(fread(&N,sizeof(int),1,fid)!=1) _error_("could not read number of columns for matrix "); 544 571 } 545 MPI_Bcast(&N,1,MPI_INT,0,MPI_COMM_WORLD); 572 #ifdef _HAVE_MPI_ 573 MPI_Bcast(&N,1,MPI_INT,0,MPI_COMM_WORLD); 574 #endif 546 575 547 576 /*Now allocate matrix: */ … … 554 583 } 555 584 585 #ifdef _HAVE_MPI_ 556 586 MPI_Bcast(matrix,M*N,MPI_DOUBLE,0,MPI_COMM_WORLD); 587 #endif 557 588 } 558 589 … … 602 633 if(fread(&M,sizeof(int),1,fid)!=1) _error_("could not read number of rows for matrix "); 603 634 } 635 #ifdef _HAVE_MPI_ 604 636 MPI_Bcast(&M,1,MPI_INT,0,MPI_COMM_WORLD); 637 #endif 605 638 606 639 if(my_rank==0){ 607 640 if(fread(&N,sizeof(int),1,fid)!=1) _error_("could not read number of columns for matrix "); 608 641 } 642 #ifdef _HAVE_MPI_ 609 643 MPI_Bcast(&N,1,MPI_INT,0,MPI_COMM_WORLD); 644 #endif 610 645 611 646 /*Now allocate matrix: */ … … 617 652 if(fread(matrix,M*N*sizeof(double),1,fid)!=1) _error_("could not read matrix "); 618 653 } 654 #ifdef _HAVE_MPI_ 619 655 MPI_Bcast(matrix,M*N,MPI_DOUBLE,0,MPI_COMM_WORLD); 656 #endif 620 657 } 621 658 … … 652 689 if(fread(&numstrings,sizeof(int),1,fid)!=1) _error_(" could not read length of string array"); 653 690 } 691 #ifdef _HAVE_MPI_ 654 692 MPI_Bcast(&numstrings,1,MPI_INT,0,MPI_COMM_WORLD); 693 #endif 655 694 656 695 /*Now allocate string array: */ … … 665 704 if(fread(&string_size,sizeof(int),1,fid)!=1) _error_(" could not read length of string "); 666 705 } 706 #ifdef _HAVE_MPI_ 667 707 MPI_Bcast(&string_size,1,MPI_INT,0,MPI_COMM_WORLD); 708 #endif 668 709 if(string_size){ 669 710 string=(char*)xmalloc((string_size+1)*sizeof(char)); … … 674 715 if(fread(string,string_size*sizeof(char),1,fid)!=1)_error_(" could not read string "); 675 716 } 717 #ifdef _HAVE_MPI_ 676 718 MPI_Bcast(string,string_size,MPI_CHAR,0,MPI_COMM_WORLD); 719 #endif 677 720 } 678 721 else{ … … 717 760 if(fread(&numrecords,sizeof(int),1,fid)!=1) _error_("could not read number of records in matrix array "); 718 761 } 762 #ifdef _HAVE_MPI_ 719 763 MPI_Bcast(&numrecords,1,MPI_INT,0,MPI_COMM_WORLD); 764 #endif 720 765 721 766 if(numrecords){ … … 738 783 if(fread(&M,sizeof(int),1,fid)!=1) _error_("%s%i%s","could not read number of rows in ",i,"th matrix of matrix array"); 739 784 } 785 #ifdef _HAVE_MPI_ 740 786 MPI_Bcast(&M,1,MPI_INT,0,MPI_COMM_WORLD); 787 #endif 741 788 742 789 if(my_rank==0){ 743 790 if(fread(&N,sizeof(int),1,fid)!=1) _error_("%s%i%s","could not read number of columns in ",i,"th matrix of matrix array"); 744 791 } 792 #ifdef _HAVE_MPI_ 745 793 MPI_Bcast(&N,1,MPI_INT,0,MPI_COMM_WORLD); 794 #endif 746 795 747 796 /*Now allocate matrix: */ … … 754 803 } 755 804 805 #ifdef _HAVE_MPI_ 756 806 MPI_Bcast(matrix,M*N,MPI_DOUBLE,0,MPI_COMM_WORLD); 807 #endif 757 808 } 758 809 … … 1047 1098 } 1048 1099 } 1100 #ifdef _HAVE_MPI_ 1049 1101 MPI_Bcast(&found,1,MPI_INT,0,MPI_COMM_WORLD); 1050 1102 if(!found)_error_("%s %s ","could not find data with name",EnumToStringx(data_enum)); 1103 #endif 1051 1104 1052 1105 /*Broadcast code and vector type: */ 1106 #ifdef _HAVE_MPI_ 1053 1107 MPI_Bcast(&record_code,1,MPI_INT,0,MPI_COMM_WORLD); 1054 1108 MPI_Bcast(&vector_type,1,MPI_INT,0,MPI_COMM_WORLD); 1055 1109 if(record_code==5) MPI_Bcast(&vector_type,1,MPI_INT,0,MPI_COMM_WORLD); 1110 #endif 1056 1111 1057 1112 /*Assign output pointers:*/ -
issm/trunk-jpl/src/c/objects/Patch.cpp
r12014 r12102 120 120 int node_numrows; 121 121 double *total_values = NULL; 122 #ifdef _HAVE_MPI_ 122 123 MPI_Status status; 124 #endif 123 125 124 126 /*First, figure out total number of rows combining all the cpus: */ 127 #ifdef _HAVE_MPI_ 125 128 MPI_Reduce(&this->numrows,&total_numrows,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); 126 129 MPI_Bcast(&total_numrows,1,MPI_INT,0,MPI_COMM_WORLD); 130 #else 131 total_numrows=this->numrows; 132 #endif 127 133 128 134 /*return if patch empty*/ … … 140 146 141 147 /*Now, ask other nodes to send their values: */ 148 #ifdef _HAVE_MPI_ 142 149 for (i=1;i<num_procs;i++){ 143 150 if (my_rank==i){ … … 151 158 } 152 159 } 160 #endif 153 161 154 162 /*Now, node 0 has total_values, of size total_numrows*this->numcols. Update the fields in the patch, to reflect this new … … 159 167 this->values=total_values; 160 168 } 169 #ifdef _HAVE_MPI_ 161 170 else{ 162 171 this->numrows=0; 163 172 xfree((void**)&this->values); 164 173 } 174 #endif 165 175 }/*}}}*/ -
issm/trunk-jpl/src/c/solutions/issm.cpp
r12022 r12102 42 42 if(ierr) _error_("Could not initialize Petsc"); 43 43 #else 44 #ifdef _HAVE_MPI_ 44 45 MPI_Init(&argc,&argv); 45 46 #endif 47 #endif 46 48 49 #ifdef _HAVE_MPI_ 47 50 MPI_Barrier(MPI_COMM_WORLD); start=MPI_Wtime(); 51 #else 52 start=(double)clock(); 53 #endif 48 54 49 55 /*Size and rank: */ 56 #ifdef _HAVE_MPI_ 50 57 MPI_Comm_rank(MPI_COMM_WORLD,&my_rank); 51 58 MPI_Comm_size(MPI_COMM_WORLD,&num_procs); 59 #endif 52 60 53 61 /*First process inputs*/ … … 62 70 63 71 /*Create femmodel, using input file: */ 72 #ifdef _HAVE_MPI_ 64 73 MPI_Barrier(MPI_COMM_WORLD); start_init=MPI_Wtime(); 74 #else 75 start_init=(double)clock(); 76 #endif 65 77 femmodel=new FemModel(binfilename,outbinfilename,solution_type,analyses,numanalyses); 66 78 … … 82 94 femmodel->parameters->FindParam(&control_analysis,InversionIscontrolEnum); 83 95 femmodel->parameters->FindParam(&tao_analysis,InversionTaoEnum); 96 #ifdef _HAVE_MPI_ 84 97 MPI_Barrier(MPI_COMM_WORLD); finish_init=MPI_Wtime(); 98 #else 99 finish_init=(double)clock(); 100 #endif 85 101 86 102 _printf_(true,"call computational core:\n"); 103 #ifdef _HAVE_MPI_ 87 104 MPI_Barrier(MPI_COMM_WORLD); start_core=MPI_Wtime( ); 105 #else 106 start_core=(double)clock(); 107 #endif 108 88 109 if(dakota_analysis){ 89 110 #ifdef _HAVE_DAKOTA_ … … 106 127 solutioncore(femmodel); 107 128 } 129 #ifdef _HAVE_MPI_ 108 130 MPI_Barrier(MPI_COMM_WORLD); finish_core=MPI_Wtime( ); 109 131 #else 132 finish_core=(double)clock(); 133 #endif 134 110 135 _printf_(true,"write results to disk:\n"); 111 136 OutputResultsx(femmodel->elements, femmodel->nodes, femmodel->vertices, femmodel->loads, femmodel->materials, femmodel->parameters,femmodel->results); … … 127 152 128 153 /*Get finish time and close*/ 154 #ifdef _HAVE_MPI_ 129 155 MPI_Barrier(MPI_COMM_WORLD); finish = MPI_Wtime( ); 130 156 _printf_(true,"\n %-34s %f seconds \n","FemModel initialization elapsed time:",finish_init-start_init); 131 157 _printf_(true," %-34s %f seconds \n","Core solution elapsed time:",finish_core-start_core); 132 158 _printf_(true,"\n %s %i hrs %i min %i sec\n\n","Total elapsed time:",int((finish-start)/3600),int(int(finish-start)%3600/60),int(finish-start)%60); 159 #else 160 finish=(double)clock(); 161 _printf_(true,"\n %-34s %f seconds \n","FemModel initialization elapsed time:",(finish_init-start_init)/CLOCKS_PER_SEC); 162 _printf_(true," %-34s %f seconds \n","Core solution elapsed time:",(finish_core-start_core)/CLOCKS_PER_SEC); 163 _printf_(true,"\n %s %i hrs %i min %i sec\n\n","Total elapsed time:",int((finish-start)/3600/CLOCKS_PER_SEC),int(int((finish-start)/CLOCKS_PER_SEC)%3600/60),(int(finish-start)/CLOCKS_PER_SEC)%60); 164 #endif 133 165 166 134 167 168 #ifdef _HAVE_PETSC_ 135 169 _printf_(true,"closing MPI and Petsc\n"); 136 #ifdef _HAVE_PETSC_137 170 PetscFinalize(); 138 171 #else 172 #ifdef _HAVE_MPI_ 173 _printf_(true,"closing MPI and Petsc\n"); 139 174 MPI_Finalize(); 175 #endif 140 176 #endif 141 177 -
issm/trunk-jpl/src/c/toolkits/petsc/patches/PetscMatrixToDoubleMatrix.cpp
r12101 r12102 10 10 #endif 11 11 12 #include <string> 12 13 13 14 /*Petsc includes: */ … … 17 18 18 19 /*Petsc includes: */ 19 #include "mex.h" 20 21 #include "../../shared/shared.h" 22 #include <string> 20 #include "../../../shared/shared.h" 23 21 24 22 -
issm/trunk-jpl/src/c/toolkits/petsc/patches/PetscVectorToDoubleVector.cpp
r12101 r12102 8 8 #endif 9 9 10 #include <string> 11 10 12 /*Petsc includes: */ 11 13 #include "petscmat.h" … … 13 15 #include "petscksp.h" 14 16 15 #include "mex.h" 16 #include "../../shared/shared.h" 17 #include <string> 17 #include "../../../shared/shared.h" 18 18 19 19 void PetscVectorToDoubleVector(double** pvector, int* prows, Vec petsc_vector){ … … 23 23 int *idxm = NULL; 24 24 double *vector = NULL; 25 26 /*output: */27 mxArray* dataref=NULL;28 25 29 26 /*Get size of vector: */ -
issm/trunk-jpl/src/c/toolkits/toolkits.h
r12017 r12102 20 20 #endif 21 21 22 #ifdef _HAVE_MPI_ 22 23 #include "./mpi/mpiincludes.h" 24 #endif 25 26 #ifdef _HAVE_METIS_ 23 27 #include "./metis/metisincludes.h" 28 #endif 29 24 30 #include "./triangle/triangleincludes.h" 25 31 #include "./toolkitsenums.h" -
issm/trunk-jpl/startup.py
r12066 r12102 82 82 from miscellaneous import * 83 83 from private import * 84 from triangle import * 84 85 85 86 #}}} -
issm/trunk-jpl/test/NightlyRun/test101.py
r12047 r12102 1 1 from model import * 2 2 from TriMesh import * 3 from triangle import * 3 4 4 #md=model()5 #md=triangle(md,'../Exp/Square.exp',150000);5 md=model() 6 md=triangle(md,'../Exp/Square.exp',150000); 6 7 7 [index,x,y,segments,segmentmarkers]=TriMesh('../Exp/Square.exp',15000.0**2.0,True)8 print(index,x,y,segments,segmentmarkers)8 #[index,x,y,segments,segmentmarkers]=TriMesh('../Exp/Square.exp',15000.0**2.0,True) 9 #print(index,x,y,segments,segmentmarkers)
Note:
See TracChangeset
for help on using the changeset viewer.