source: issm/trunk/src/c/classes/Patch.cpp@ 13395

Last change on this file since 13395 was 13395, checked in by Mathieu Morlighem, 12 years ago

merged trunk-jpl and trunk for revision 13393

File size: 4.4 KB
Line 
1/*!\file Patch.c
2 * \brief: implementation of the Patch object
3 */
4
5/*Include files: {{{*/
6#ifdef HAVE_CONFIG_H
7 #include <config.h>
8#else
9#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
10#endif
11
12#include <stdio.h>
13#include <string.h>
14#include <math.h>
15#include "./classes.h"
16#include "../Container/Container.h"
17#include "../io/io.h"
18#include "../EnumDefinitions/EnumDefinitions.h"
19#include "../shared/shared.h"
20#include "../include/include.h"
21/*}}}*/
22
23/*Object constructors and destructors:*/
24/*FUNCTION Patch::Patch() default constructor {{{*/
25Patch::Patch(){
26 this->numrows=0;
27 this->numcols=0;
28 this->maxvertices=0;
29 this->maxnodes=0;
30 this->values=NULL;
31}
32/*}}}*/
33/*FUNCTION Patch::Patch(int numrows, int maxvertices, int maxnodes){{{*/
34Patch::Patch(int in_numrows, int in_maxvertices, int in_maxnodes){
35
36 int i,j;
37 this->numrows=in_numrows;
38 this->maxvertices=in_maxvertices;
39 this->maxnodes=in_maxnodes;
40
41 this->numcols=1 //enum_type
42 +1 //step
43 +1 //time
44 +1 //element id
45 +1 //interpolation type
46 +maxvertices //vertices
47 +maxnodes; //nodes
48
49 //Allocate values and fill with NaN:
50 if (this->numcols*this->numrows==0){
51 this->values=NULL;
52 }
53 else{
54 this->values=xNew<IssmDouble>(this->numcols*this->numrows);
55 for(i=0;i<this->numrows;i++){
56 for(j=0;j<this->numcols;j++){
57 this->values[i*this->numcols+j]=NAN;
58 }
59 }
60 }
61
62}
63/*}}}*/
64/*FUNCTION Patch::~Patch(){{{*/
65Patch::~Patch(){
66 xDelete<IssmDouble>(values);
67}
68/*}}}*/
69
70/*Object methods*/
71/*FUNCTION Patch::fillelementinfo{{{*/
72void Patch::fillelementinfo(int count, int element_id, int* vertices_ids, int num_vertices){
73
74 int i;
75 IssmDouble* row=NULL;
76
77 /*point to the start of the row: */
78 row=this->values+count*this->numcols;
79
80 /*Let's remember what is on a row:
81 enum_type step time element_id interpolation vertices_ids nodal_values
82 */
83 row[3]=element_id;
84 for(i=0;i<num_vertices;i++){
85 row[5+i]=vertices_ids[i];
86 }
87
88}
89/*}}}*/
90/*FUNCTION Patch::fillresultinfo{{{*/
91void Patch::fillresultinfo(int count,int enum_type,int step, IssmDouble time, int interpolation, IssmDouble* nodal_values, int num_nodes){
92
93 int i;
94 IssmDouble* row=NULL;
95
96 /*point to the start of the row: */
97 row=this->values+count*this->numcols;
98
99 /*Let's remember what is on a row:
100 enum_type step time element_id interpolation vertices_ids nodal_values
101 */
102 row[0]=enum_type;
103 row[1]=(IssmDouble)step;
104 row[2]=time;
105 row[4]=interpolation;
106 for(i=0;i<num_nodes;i++){
107 row[5+this->maxvertices+i]=nodal_values[i];
108 }
109
110}
111/*}}}*/
112/*FUNCTION Patch::Gather{{{*/
113void Patch::Gather(void){
114
115 int i;
116 int count;
117 extern int my_rank;
118 extern int num_procs;
119 int total_numrows;
120 int node_numrows;
121 IssmDouble *total_values = NULL;
122 #ifdef _HAVE_MPI_
123 MPI_Status status;
124 #endif
125
126 /*First, figure out total number of rows combining all the cpus: */
127 #ifdef _HAVE_MPI_
128 MPI_Reduce(&this->numrows,&total_numrows,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD );
129 MPI_Bcast(&total_numrows,1,MPI_INT,0,MPI_COMM_WORLD);
130 #else
131 total_numrows=this->numrows;
132 #endif
133
134 /*return if patch empty*/
135 if(total_numrows==0) return;
136
137 /*Now, allocate buffer to holds all the values, on node 0: */
138 if(my_rank==0)total_values=xNew<IssmDouble>(this->numcols*total_numrows);
139
140 /*Start by copying node 0 values onto total_values: */
141 if(my_rank==0){
142 count=0;
143 xMemCpy<IssmDouble>(total_values+count,this->values,this->numcols*this->numrows);
144 count+=this->numrows*this->numcols;
145 }
146
147 /*Now, ask other nodes to send their values: */
148 #ifdef _HAVE_MPI_
149 for (i=1;i<num_procs;i++){
150 if (my_rank==i){
151 MPI_Send(&this->numrows,1,MPI_INT,0,1,MPI_COMM_WORLD);
152 if (this->numrows)MPI_Send(this->values,this->numrows*this->numcols,MPI_DOUBLE,0,1,MPI_COMM_WORLD);
153 }
154 if (my_rank==0){
155 MPI_Recv(&node_numrows,1,MPI_INT,i,1,MPI_COMM_WORLD,&status);
156 if (node_numrows)MPI_Recv(total_values+count,node_numrows*this->numcols,MPI_DOUBLE,i,1,MPI_COMM_WORLD,&status);
157 count+=node_numrows*this->numcols;
158 }
159 }
160 #endif
161
162 /*Now, node 0 has total_values, of size total_numrows*this->numcols. Update the fields in the patch, to reflect this new
163 * reality. For other cpus, no point in keeping their data anymore: */
164 if(my_rank==0){
165 this->numrows=total_numrows;
166 xDelete<IssmDouble>(this->values);
167 this->values=total_values;
168 }
169 #ifdef _HAVE_MPI_
170 else{
171 this->numrows=0;
172 xDelete<IssmDouble>(this->values);
173 }
174 #endif
175}/*}}}*/
Note: See TracBrowser for help on using the repository browser.