source:
issm/oecreview/Archive/13393-13976/ISSM-13600-13601.diff@
14312
Last change on this file since 14312 was 13980, checked in by , 12 years ago | |
---|---|
File size: 4.6 KB |
-
../trunk-jpl/src/c/toolkits/petsc/patches/MatToSerial.cpp
15 15 void MatToSerial(double** poutmatrix,Mat matrix,COMM comm){ 16 16 17 17 int i; 18 extern int my_rank;19 extern int num_procs;18 int my_rank2; 19 int num_procs2; 20 20 21 21 /*Petsc variables*/ 22 22 PetscInt lower_row,upper_row; … … 28 28 double* local_matrix=NULL; /*matrix local to each node used for temporary holding matrix values*/ 29 29 int buffer[3]; 30 30 31 /*recover my_rank2 and num_procs2:*/ 32 MPI_Comm_size(comm,&num_procs2); 33 MPI_Comm_rank(comm,&my_rank2); 34 31 35 /*Output*/ 32 36 double* outmatrix=NULL; 33 37 … … 40 44 range=upper_row-lower_row+1; 41 45 42 46 /*Local and global allocation*/ 43 if (my_rank ==0)outmatrix=xNew<double>(M*N);47 if (my_rank2==0)outmatrix=xNew<double>(M*N); 44 48 45 49 if (range){ 46 50 local_matrix=xNew<double>(N*range); … … 60 64 /*Now each node holds its local_matrix containing range rows. 61 65 * We send these rows to the matrix on node 0*/ 62 66 63 for (i=1;i<num_procs ;i++){64 if (my_rank ==i){65 buffer[0]=my_rank ;67 for (i=1;i<num_procs2;i++){ 68 if (my_rank2==i){ 69 buffer[0]=my_rank2; 66 70 buffer[1]=lower_row; 67 71 buffer[2]=range; 68 72 MPI_Send(buffer,3,MPI_INT,0,1,comm); 69 73 if (range)MPI_Send(local_matrix,N*range,MPI_DOUBLE,0,1,comm); 70 74 } 71 if (my_rank ==0){75 if (my_rank2==0){ 72 76 MPI_Recv(buffer,3,MPI_INT,i,1,comm,&status); 73 77 if (buffer[2])MPI_Recv(outmatrix+(buffer[1]*N),N*buffer[2],MPI_DOUBLE,i,1,comm,&status); 74 78 } 75 79 } 76 if (my_rank ==0){80 if (my_rank2==0){ 77 81 //Still have the local_matrix on node 0 to take care of. 78 82 memcpy(outmatrix,local_matrix,N*range*sizeof(double)); 79 83 } -
../trunk-jpl/src/c/toolkits/petsc/patches/VecToMPISerial.cpp
15 15 int VecToMPISerial(double** pgathered_vector, Vec vector,COMM comm){ 16 16 17 17 int i; 18 extern int num_procs;19 extern int my_rank;18 int num_procs2; 19 int my_rank2; 20 20 21 21 /*Petsc*/ 22 22 MPI_Status status; … … 33 33 34 34 /*Output*/ 35 35 double* gathered_vector=NULL; //Global vector holding the final assembled vector on all nodes. 36 37 /*recover my_rank2 and num_procs2*/ 38 MPI_Comm_size(comm,&num_procs2); 39 MPI_Comm_rank(comm,&my_rank2); 36 40 37 41 VecGetSize(vector,&vector_size); 38 42 if(vector_size==0){ … … 60 64 61 65 /*Now each node holds its local_vector containing range rows. 62 66 * We send this local_vector to the gathered_vector on node 0*/ 63 for (i=1;i<num_procs ;i++){64 if (my_rank ==i){65 buffer[0]=my_rank ;67 for (i=1;i<num_procs2;i++){ 68 if (my_rank2==i){ 69 buffer[0]=my_rank2; 66 70 buffer[1]=lower_row; 67 71 buffer[2]=range; 68 72 MPI_Send(buffer,3,MPI_INT,0,1,comm); 69 73 if (range)MPI_Send(local_vector,range,MPI_DOUBLE,0,1,comm); 70 74 } 71 if (my_rank ==0){75 if (my_rank2==0){ 72 76 MPI_Recv(buffer,3,MPI_INT,i,1,comm,&status); 73 77 if (buffer[2])MPI_Recv(gathered_vector+buffer[1],buffer[2],MPI_DOUBLE,i,1,comm,&status); 74 78 } 75 79 } 76 80 77 if (my_rank ==0){81 if (my_rank2==0){ 78 82 //Still have the local_vector on node 0 to take care of. 79 83 if (range)memcpy(gathered_vector+lower_row,local_vector,range*sizeof(double)); 80 84 } -
../trunk-jpl/src/c/toolkits/petsc/patches/GetOwnershipBoundariesFromRange.cpp
16 16 void GetOwnershipBoundariesFromRange(int* plower_row,int* pupper_row,int range,COMM comm){ 17 17 18 18 /*externals :*/ 19 extern int my_rank;20 extern int num_procs;19 int my_rank2; 20 int num_procs2; 21 21 22 /*recover my_rank2 and num_procs2:*/ 23 MPI_Comm_size(comm,&num_procs2); 24 MPI_Comm_rank(comm,&my_rank2); 25 22 26 /*output: */ 23 27 int lower_row,upper_row; 24 28 … … 27 31 int* allranges=NULL; 28 32 29 33 /*Gather all range values into allranges, for all nodes*/ 30 allranges=xNew<int>(num_procs );34 allranges=xNew<int>(num_procs2); 31 35 MPI_Allgather(&range,1,MPI_INT,allranges,1,MPI_INT,comm); 32 36 33 37 /*From all ranges, get lower row and upper row*/ 34 38 lower_row=0; 35 39 upper_row=lower_row+allranges[0]; 36 for (i=1;i<=my_rank ;i++){40 for (i=1;i<=my_rank2;i++){ 37 41 lower_row=lower_row+allranges[i-1]; 38 42 upper_row=upper_row+allranges[i]; 39 43 }
Note:
See TracBrowser
for help on using the repository browser.