source: issm/trunk/src/c/classes/Patch.cpp@ 13975

Last change on this file since 13975 was 13975, checked in by Mathieu Morlighem, 12 years ago

merged trunk-jpl and trunk for revision 13974

File size: 4.5 KB
Line 
1/*!\file Patch.c
2 * \brief: implementation of the Patch object
3 */
4
5/*Include files */
6#ifdef HAVE_CONFIG_H
7 #include <config.h>
8#else
9#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
10#endif
11
12#include <stdio.h>
13#include <string.h>
14#include <math.h>
15#include "./classes.h"
16#include "../Container/Container.h"
17#include "../io/io.h"
18#include "../EnumDefinitions/EnumDefinitions.h"
19#include "../shared/shared.h"
20#include "../include/include.h"
21
22/*Object constructors and destructors:*/
23/*FUNCTION Patch::Patch() default constructor {{{*/
24Patch::Patch(){
25 this->numrows = 0;
26 this->numcols = 0;
27 this->maxvertices = 0;
28 this->maxnodes = 0;
29 this->values = NULL;
30}
31/*}}}*/
32/*FUNCTION Patch::Patch(int numrows, int maxvertices, int maxnodes){{{*/
33Patch::Patch(int in_numrows, int in_maxvertices, int in_maxnodes){
34
35 this->numrows=in_numrows;
36 this->maxvertices=in_maxvertices;
37 this->maxnodes=in_maxnodes;
38 this->numcols=1 //enum_type
39 +1 //step
40 +1 //time
41 +1 //element id
42 +1 //interpolation type
43 +maxvertices //vertices
44 +maxnodes; //nodes
45
46 //Allocate values and fill with NaN:
47 if (this->numcols*this->numrows==0){
48 this->values=NULL;
49 }
50 else{
51 this->values=xNew<IssmDouble>(this->numcols*this->numrows);
52 for(int i=0;i<this->numrows;i++){
53 for(int j=0;j<this->numcols;j++){
54 this->values[i*this->numcols+j]=NAN;
55 }
56 }
57 }
58
59}
60/*}}}*/
61/*FUNCTION Patch::~Patch(){{{*/
62Patch::~Patch(){
63 xDelete<IssmDouble>(values);
64}
65/*}}}*/
66
67/*Object methods*/
68/*FUNCTION Patch::fillelementinfo{{{*/
69void Patch::fillelementinfo(int count, int element_id, int* vertices_ids, int num_vertices){
70
71 int i;
72 IssmDouble* row=NULL;
73
74 /*point to the start of the row: */
75 row=this->values+count*this->numcols;
76
77 /*Let's remember what is on a row:
78 enum_type step time element_id interpolation vertices_ids nodal_values
79 */
80 row[3]=element_id;
81 for(i=0;i<num_vertices;i++){
82 row[5+i]=vertices_ids[i];
83 }
84
85}
86/*}}}*/
87/*FUNCTION Patch::fillresultinfo{{{*/
88void Patch::fillresultinfo(int count,int enum_type,int step, IssmDouble time, int interpolation, IssmDouble* nodal_values, int num_nodes){
89
90 int i;
91 IssmDouble* row=NULL;
92
93 /*point to the start of the row: */
94 row=this->values+count*this->numcols;
95
96 /*Let's remember what is on a row:
97 enum_type step time element_id interpolation vertices_ids nodal_values */
98 row[0]=enum_type;
99 row[1]=(IssmDouble)step;
100 row[2]=time;
101 row[4]=interpolation;
102 for(i=0;i<num_nodes;i++){
103 row[5+this->maxvertices+i]=nodal_values[i];
104 }
105
106}
107/*}}}*/
108/*FUNCTION Patch::Gather{{{*/
109void Patch::Gather(void){
110
111 int count;
112 int my_rank;
113 int num_procs;
114 int total_numrows;
115 int node_numrows;
116 IssmDouble *total_values = NULL;
117 #ifdef _HAVE_MPI_
118 MPI_Status status;
119 #endif
120
121 /*recover my_rank:*/
122 my_rank=IssmComm::GetRank();
123 num_procs=IssmComm::GetSize();
124
125 /*First, figure out total number of rows combining all the cpus: */
126 #ifdef _HAVE_MPI_
127 MPI_Reduce(&this->numrows,&total_numrows,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() );
128 MPI_Bcast(&total_numrows,1,MPI_INT,0,IssmComm::GetComm());
129 #else
130 total_numrows=this->numrows;
131 #endif
132
133 /*return if patch empty*/
134 if(total_numrows==0) return;
135
136 /*Now, allocate buffer to holds all the values, on node 0: */
137 if(my_rank==0)total_values=xNew<IssmDouble>(this->numcols*total_numrows);
138
139 /*Start by copying node 0 values onto total_values: */
140 if(my_rank==0){
141 count=0;
142 xMemCpy<IssmDouble>(total_values+count,this->values,this->numcols*this->numrows);
143 count+=this->numrows*this->numcols;
144 }
145
146 /*Now, ask other nodes to send their values: */
147 #ifdef _HAVE_MPI_
148 for(int i=1;i<num_procs;i++){
149 if (my_rank==i){
150 MPI_Send(&this->numrows,1,MPI_INT,0,1,IssmComm::GetComm());
151 if (this->numrows)MPI_Send(this->values,this->numrows*this->numcols,MPI_DOUBLE,0,1,IssmComm::GetComm());
152 }
153 if (my_rank==0){
154 MPI_Recv(&node_numrows,1,MPI_INT,i,1,IssmComm::GetComm(),&status);
155 if (node_numrows)MPI_Recv(total_values+count,node_numrows*this->numcols,MPI_DOUBLE,i,1,IssmComm::GetComm(),&status);
156 count+=node_numrows*this->numcols;
157 }
158 }
159 #endif
160
161 /*Now, node 0 has total_values, of size total_numrows*this->numcols. Update the fields in the patch, to reflect this new
162 * reality. For other cpus, no point in keeping their data anymore: */
163 if(my_rank==0){
164 this->numrows=total_numrows;
165 xDelete<IssmDouble>(this->values);
166 this->values=total_values;
167 }
168 #ifdef _HAVE_MPI_
169 else{
170 this->numrows=0;
171 xDelete<IssmDouble>(this->values);
172 }
173 #endif
174}/*}}}*/
Note: See TracBrowser for help on using the repository browser.