Changeset 13601
- Timestamp:
- 10/10/12 22:53:28 (12 years ago)
- Location:
- issm/trunk-jpl/src/c/toolkits/petsc/patches
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified issm/trunk-jpl/src/c/toolkits/petsc/patches/GetOwnershipBoundariesFromRange.cpp ¶
r13595 r13601 17 17 18 18 /*externals :*/ 19 extern int my_rank;20 extern int num_procs;19 int my_rank2; 20 int num_procs2; 21 21 22 /*recover my_rank2 and num_procs2:*/ 23 MPI_Comm_size(comm,&num_procs2); 24 MPI_Comm_rank(comm,&my_rank2); 25 22 26 /*output: */ 23 27 int lower_row,upper_row; … … 28 32 29 33 /*Gather all range values into allranges, for all nodes*/ 30 allranges=xNew<int>(num_procs );34 allranges=xNew<int>(num_procs2); 31 35 MPI_Allgather(&range,1,MPI_INT,allranges,1,MPI_INT,comm); 32 36 … … 34 38 lower_row=0; 35 39 upper_row=lower_row+allranges[0]; 36 for (i=1;i<=my_rank ;i++){40 for (i=1;i<=my_rank2;i++){ 37 41 lower_row=lower_row+allranges[i-1]; 38 42 upper_row=upper_row+allranges[i]; -
TabularUnified issm/trunk-jpl/src/c/toolkits/petsc/patches/MatToSerial.cpp ¶
r13595 r13601 16 16 17 17 int i; 18 extern int my_rank;19 extern int num_procs;18 int my_rank2; 19 int num_procs2; 20 20 21 21 /*Petsc variables*/ … … 29 29 int buffer[3]; 30 30 31 /*recover my_rank2 and num_procs2:*/ 32 MPI_Comm_size(comm,&num_procs2); 33 MPI_Comm_rank(comm,&my_rank2); 34 31 35 /*Output*/ 32 36 double* outmatrix=NULL; … … 41 45 42 46 /*Local and global allocation*/ 43 if (my_rank ==0)outmatrix=xNew<double>(M*N);47 if (my_rank2==0)outmatrix=xNew<double>(M*N); 44 48 45 49 if (range){ … … 61 65 * We send these rows to the matrix on node 0*/ 62 66 63 for (i=1;i<num_procs ;i++){64 if (my_rank ==i){65 buffer[0]=my_rank ;67 for (i=1;i<num_procs2;i++){ 68 if (my_rank2==i){ 69 buffer[0]=my_rank2; 66 70 buffer[1]=lower_row; 67 71 buffer[2]=range; … … 69 73 if (range)MPI_Send(local_matrix,N*range,MPI_DOUBLE,0,1,comm); 70 74 } 71 if (my_rank ==0){75 if (my_rank2==0){ 72 76 MPI_Recv(buffer,3,MPI_INT,i,1,comm,&status); 73 77 if (buffer[2])MPI_Recv(outmatrix+(buffer[1]*N),N*buffer[2],MPI_DOUBLE,i,1,comm,&status); 74 78 } 75 79 } 76 if (my_rank ==0){80 if (my_rank2==0){ 77 81 //Still have the local_matrix on node 0 to take care of. 78 82 memcpy(outmatrix,local_matrix,N*range*sizeof(double)); -
TabularUnified issm/trunk-jpl/src/c/toolkits/petsc/patches/VecToMPISerial.cpp ¶
r13595 r13601 16 16 17 17 int i; 18 extern int num_procs;19 extern int my_rank;18 int num_procs2; 19 int my_rank2; 20 20 21 21 /*Petsc*/ … … 34 34 /*Output*/ 35 35 double* gathered_vector=NULL; //Global vector holding the final assembled vector on all nodes. 36 37 /*recover my_rank2 and num_procs2*/ 38 MPI_Comm_size(comm,&num_procs2); 39 MPI_Comm_rank(comm,&my_rank2); 36 40 37 41 VecGetSize(vector,&vector_size); … … 61 65 /*Now each node holds its local_vector containing range rows. 62 66 * We send this local_vector to the gathered_vector on node 0*/ 63 for (i=1;i<num_procs ;i++){64 if (my_rank ==i){65 buffer[0]=my_rank ;67 for (i=1;i<num_procs2;i++){ 68 if (my_rank2==i){ 69 buffer[0]=my_rank2; 66 70 buffer[1]=lower_row; 67 71 buffer[2]=range; … … 69 73 if (range)MPI_Send(local_vector,range,MPI_DOUBLE,0,1,comm); 70 74 } 71 if (my_rank ==0){75 if (my_rank2==0){ 72 76 MPI_Recv(buffer,3,MPI_INT,i,1,comm,&status); 73 77 if (buffer[2])MPI_Recv(gathered_vector+buffer[1],buffer[2],MPI_DOUBLE,i,1,comm,&status); … … 75 79 } 76 80 77 if (my_rank ==0){81 if (my_rank2==0){ 78 82 //Still have the local_vector on node 0 to take care of. 79 83 if (range)memcpy(gathered_vector+lower_row,local_vector,range*sizeof(double));
Note:
See TracChangeset
for help on using the changeset viewer.