Changeset 13609
- Timestamp:
- 10/10/12 23:53:10 (12 years ago)
- Location:
- issm/trunk-jpl/src/c
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified issm/trunk-jpl/src/c/classes/FemModel.cpp ¶
r13589 r13609 84 84 FILE *petscoptionsfid = NULL; 85 85 FILE *output_fid = NULL; 86 extern int my_rank; 86 int my_rank2; 87 88 /*recover my_rank2:*/ 89 my_rank2=IssmComm::GetRank(); 87 90 88 91 /*Open input file on cpu 0: */ 89 if(my_rank ==0) IOMODEL = pfopen(inputfilename ,"rb");92 if(my_rank2==0) IOMODEL = pfopen(inputfilename ,"rb"); 90 93 91 94 /*Initialize internal data: */ … … 127 130 128 131 /*Close input file descriptors: */ 129 if(my_rank ==0) pfclose(IOMODEL,inputfilename);132 if(my_rank2==0) pfclose(IOMODEL,inputfilename); 130 133 131 134 /*Open output file once for all and add output file name and file descriptor to parameters*/ -
TabularUnified issm/trunk-jpl/src/c/classes/Patch.cpp ¶
r13607 r13609 111 111 int i; 112 112 int count; 113 extern int my_rank;114 extern int num_procs;113 int my_rank2; 114 int num_procs2; 115 115 int total_numrows; 116 116 int node_numrows; … … 119 119 MPI_Status status; 120 120 #endif 121 122 /*recover my_rank2:*/ 123 my_rank2=IssmComm::GetRank(); 124 num_procs2=IssmComm::GetSize(); 121 125 122 126 /*First, figure out total number of rows combining all the cpus: */ … … 132 136 133 137 /*Now, allocate buffer to holds all the values, on node 0: */ 134 if(my_rank ==0)total_values=xNew<IssmDouble>(this->numcols*total_numrows);138 if(my_rank2==0)total_values=xNew<IssmDouble>(this->numcols*total_numrows); 135 139 136 140 /*Start by copying node 0 values onto total_values: */ 137 if(my_rank ==0){141 if(my_rank2==0){ 138 142 count=0; 139 143 xMemCpy<IssmDouble>(total_values+count,this->values,this->numcols*this->numrows); … … 143 147 /*Now, ask other nodes to send their values: */ 144 148 #ifdef _HAVE_MPI_ 145 for (i=1;i<num_procs ;i++){146 if (my_rank ==i){149 for (i=1;i<num_procs2;i++){ 150 if (my_rank2==i){ 147 151 MPI_Send(&this->numrows,1,MPI_INT,0,1,IssmComm::GetComm()); 148 152 if (this->numrows)MPI_Send(this->values,this->numrows*this->numcols,MPI_DOUBLE,0,1,IssmComm::GetComm()); 149 153 } 150 if (my_rank ==0){154 if (my_rank2==0){ 151 155 MPI_Recv(&node_numrows,1,MPI_INT,i,1,IssmComm::GetComm(),&status); 152 156 if (node_numrows)MPI_Recv(total_values+count,node_numrows*this->numcols,MPI_DOUBLE,i,1,IssmComm::GetComm(),&status); … … 158 162 /*Now, node 0 has total_values, of size total_numrows*this->numcols. Update the fields in the patch, to reflect this new 159 163 * reality. For other cpus, no point in keeping their data anymore: */ 160 if(my_rank ==0){164 if(my_rank2==0){ 161 165 this->numrows=total_numrows; 162 166 xDelete<IssmDouble>(this->values); -
TabularUnified issm/trunk-jpl/src/c/classes/classes.h ¶
r13589 r13609 5 5 #ifndef ALL_CLASSES_H_ 6 6 #define ALL_CLASSES_H_ 7 7 8 8 9 /*Objects derived classes, which are used in our containers: */ -
TabularUnified issm/trunk-jpl/src/c/classes/objects/ExternalResults/GenericExternalResult.h ¶
r13414 r13609 20 20 #include "../../../io/io.h" 21 21 #include "../../../EnumDefinitions/EnumDefinitions.h" 22 #include "../../../classes/IssmComm.h" 22 23 #include "./ExternalResult.h" 23 24 /*}}}*/ … … 109 110 void WriteData(FILE* fid,bool io_gather){ /*{{{*/ 110 111 111 extern int my_rank;112 int my_rank2; 112 113 int type; 113 114 int size; 114 115 IssmPDouble passiveDouble; 115 116 117 /*recover my_rank2:*/ 118 my_rank2=IssmComm::GetRank(); 119 116 120 /*return if now on cpu 0: */ 117 if(my_rank )return;121 if(my_rank2)return; 118 122 119 123 /*use generic part, same for all ResultTypes: */ … … 201 205 template <> inline void GenericExternalResult<char*>::WriteData(FILE* fid,bool io_gather){ /*{{{*/ 202 206 203 extern int my_rank;207 int my_rank2; 204 208 int type; 205 209 int length; 210 211 /*recover my_rank2:*/ 212 my_rank2=IssmComm::GetRank(); 206 213 207 214 /*return if now on cpu 0: */ 208 if(my_rank )return;215 if(my_rank2)return; 209 216 210 217 /*use generic part, same for all ResultTypes: */ … … 280 287 template <> inline void GenericExternalResult<IssmPDouble*>::WriteData(FILE* fid,bool io_gather){ /*{{{*/ 281 288 282 extern int my_rank;289 int my_rank2; 283 290 int length; 284 291 int type; 285 292 int rows,cols; 286 293 char *name = NULL; 287 extern int my_rank;288 294 IssmPDouble passiveDouble; 295 296 /*recover my_rank2:*/ 297 my_rank2=IssmComm::GetRank(); 289 298 290 299 if(io_gather){ 291 300 /*we are gathering the data on cpu 0, don't write on other cpus: */ 292 if(my_rank ) return;301 if(my_rank2) return; 293 302 } 294 303 -
TabularUnified issm/trunk-jpl/src/c/include/globals.h ¶
r13589 r13609 9 9 #include "./types.h" 10 10 #include "../classes/IssmComm.h" 11 11 12 COMM IssmComm::comm; 12 13 13 int my_rank=0;14 int num_procs=1;14 //int my_rank=0; 15 //int num_procs=1; 15 16 16 17 #endif -
TabularUnified issm/trunk-jpl/src/c/io/PrintfFunction.cpp ¶
r13412 r13609 15 15 char *buffer = NULL; 16 16 int n,size = 100; 17 int 18 extern int my_rank;17 int string_size; 18 int my_rank2; 19 19 //variable list of arguments 20 20 va_list args; 21 22 /*recover my_rank2:*/ 23 my_rank2=IssmComm::GetRank(); 21 24 22 25 while(true){ … … 43 46 44 47 /*Ok, if we are running in parallel, get node 0 to print*/ 45 if(my_rank ==0)_printString_(buffer);48 if(my_rank2==0)_printString_(buffer); 46 49 47 50 /*Clean up and return*/ … … 50 53 } 51 54 int PrintfFunction(const string & message){ 52 extern int my_rank; 53 if(my_rank==0){ 55 int my_rank2; 56 57 /*recover my_rank2:*/ 58 my_rank2=IssmComm::GetRank(); 59 60 if(my_rank2==0){ 54 61 printf("%s\n",message.c_str()); 55 62 } … … 57 64 } 58 65 int PrintfFunction2(const string & message){ 59 extern int my_rank; 60 if(my_rank==0){ 66 int my_rank2; 67 68 /*recover my_rank2:*/ 69 my_rank2=IssmComm::GetRank(); 70 71 if(my_rank2==0){ 61 72 printf("%s",message.c_str()); 62 73 } -
TabularUnified issm/trunk-jpl/src/c/matlab/io/PrintfFunction.cpp ¶
r13412 r13609 18 18 int n,size = 100; 19 19 int string_size; 20 extern int my_rank; 20 int my_rank2; 21 22 /*recover my_rank2:*/ 23 my_rank2=IssmComm::GetRank(); 21 24 22 25 //variable list of arguments … … 50 53 51 54 /*Ok, if we are running in parallel, get node 0 to print*/ 52 if(my_rank ==0)_printString_(buffer);55 if(my_rank2==0)_printString_(buffer); 53 56 54 57 /*Clean up and return*/ -
TabularUnified issm/trunk-jpl/src/c/modules/OutputResultsx/OutputResultsx.cpp ¶
r13325 r13609 18 18 void OutputResultsx(Elements* elements, Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials, Parameters* parameters,Results* results){ 19 19 20 extern int my_rank;20 int my_rank2; 21 21 FILE *fid = NULL; 22 22 char *outputfilename = NULL; … … 29 29 /*retrieve parameters: */ 30 30 parameters->FindParam(&dakota_analysis,QmuIsdakotaEnum); 31 32 /*recover my_rank2:*/ 33 my_rank2=IssmComm::GetRank(); 31 34 32 35 if(dakota_analysis){ … … 42 45 /*Results do not include the type of solution being run . In parallel, we output results to a filename, 43 46 *therefore, we need to include the solutiontype into the filename: */ 44 if(my_rank ==0){47 if(my_rank2==0){ 45 48 parameters->FindParam(&solutiontype,SolutionTypeEnum); 46 49 EnumToStringx(&solutiontypestring,solutiontype); … … 60 63 if(io_gather){ 61 64 /*Just open the file for output on cpu 0. We are gathering the data on cpu 0 from all other cpus: */ 62 if(my_rank ==0) fid=pfopen(outputfilename ,"wb");65 if(my_rank2==0) fid=pfopen(outputfilename ,"wb"); 63 66 } 64 67 else{ 65 68 /*We are opening different files for output on all cpus. Append the rank to the filename, and open: */ 66 69 parameters->FindParam(&fid,OutputFilePointerEnum); 67 sprintf(cpu_outputfilename,"%s.%i",outputfilename,my_rank );70 sprintf(cpu_outputfilename,"%s.%i",outputfilename,my_rank2); 68 71 fid=pfopen(cpu_outputfilename ,"wb"); 69 72 } … … 84 87 if((step==1) && (time==0)){ 85 88 if(io_gather){ 86 if(my_rank ==0) pfclose(fid,outputfilename);89 if(my_rank2==0) pfclose(fid,outputfilename); 87 90 } 88 91 else pfclose(fid,cpu_outputfilename); -
TabularUnified issm/trunk-jpl/src/c/solutions/dakota_core.cpp ¶
r13540 r13609 57 57 #ifdef _HAVE_DAKOTA_ //only works if dakota library has been compiled in. 58 58 59 extern int my_rank;59 int my_rank2; 60 60 char* dakota_input_file = NULL; 61 61 char* dakota_output_file = NULL; … … 69 69 femmodel->parameters->FindParam(&dakota_output_file,QmuOutNameEnum); 70 70 femmodel->parameters->FindParam(&dakota_error_file,QmuErrNameEnum); 71 72 /*recover my_rank2:*/ 73 my_rank2=IssmComm::GetRank(); 71 74 72 if(my_rank ==0){75 if(my_rank2==0){ 73 76 74 77 // Instantiate/initialize the parallel library and problem description -
TabularUnified issm/trunk-jpl/src/c/solutions/issm.cpp ¶
r13589 r13609 16 16 17 17 /*Hack for now: */ 18 MPI_Comm_rank(comm_init,&my_rank);19 MPI_Comm_size(comm_init,&num_procs);18 //MPI_Comm_rank(comm_init,&my_rank); 19 //MPI_Comm_size(comm_init,&num_procs); 20 20 21 21 /*Initialize femmodel from arguments provided command line: */ -
TabularUnified issm/trunk-jpl/src/c/solutions/kriging.cpp ¶
r13554 r13609 8 8 void ProcessArguments2(char** pbinfilename,char** poutbinfilename,char** plockfilename,char** prootpath,int argc,char **argv); 9 9 void ProcessInputfile(IssmDouble **px,IssmDouble **py,IssmDouble **pdata,int *pnobs,IssmDouble **px_interp,IssmDouble **py_interp,int *pninterp,Options **poptions,FILE* fid); 10 11 int my_rank; 12 int num_procs; 10 13 11 14 int main(int argc,char **argv){ … … 21 24 char *outbinfilename = NULL; 22 25 char *rootpath = NULL; 26 27 MPI_Comm comm; 28 extern int my_rank; 29 extern int num_procs; 23 30 24 31 /*Input*/ … … 39 46 40 47 /*Initialize environment (MPI, PETSC, MUMPS, etc ...)*/ 41 EnvironmentInit(argc,argv); 48 comm=EnvironmentInit(argc,argv); 49 50 MPI_Comm_size(comm,&num_procs); 51 MPI_Comm_rank(comm,&my_rank); 42 52 43 53 /*First process inputs*/
Note:
See TracChangeset
for help on using the changeset viewer.