source: issm/trunk/src/c/classes/Patch.cpp@ 15396

Last change on this file since 15396 was 15396, checked in by Mathieu Morlighem, 12 years ago

merged trunk-jpl and trunk for revision 15394

File size: 4.3 KB
RevLine 
[4039]1/*!\file Patch.c
2 * \brief: implementation of the Patch object
3 */
4
[13975]5/*Include files */
[4039]6#ifdef HAVE_CONFIG_H
[9320]7 #include <config.h>
[4039]8#else
9#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
10#endif
11
[9320]12#include <stdio.h>
[4039]13#include <string.h>
[11399]14#include <math.h>
[12832]15#include "./classes.h"
[4039]16#include "../shared/shared.h"
17
18/*Object constructors and destructors:*/
[12365]19/*FUNCTION Patch::Patch() default constructor {{{*/
[4039]20Patch::Patch(){
[13975]21 this->numrows = 0;
22 this->numcols = 0;
23 this->maxvertices = 0;
24 this->maxnodes = 0;
25 this->values = NULL;
[4039]26}
27/*}}}*/
[12365]28/*FUNCTION Patch::Patch(int numrows, int maxvertices, int maxnodes){{{*/
[4039]29Patch::Patch(int in_numrows, int in_maxvertices, int in_maxnodes){
30
31 this->numrows=in_numrows;
32 this->maxvertices=in_maxvertices;
33 this->maxnodes=in_maxnodes;
34 this->numcols=1 //enum_type
35 +1 //step
36 +1 //time
37 +1 //element id
38 +1 //interpolation type
39 +maxvertices //vertices
40 +maxnodes; //nodes
41
[4508]42 //Allocate values and fill with NaN:
43 if (this->numcols*this->numrows==0){
44 this->values=NULL;
45 }
46 else{
[13975]47 this->values=xNew<IssmDouble>(this->numcols*this->numrows);
48 for(int i=0;i<this->numrows;i++){
49 for(int j=0;j<this->numcols;j++){
[4508]50 this->values[i*this->numcols+j]=NAN;
51 }
[4039]52 }
53 }
54
55}
56/*}}}*/
[12365]57/*FUNCTION Patch::~Patch(){{{*/
[4039]58Patch::~Patch(){
[12366]59 xDelete<IssmDouble>(values);
[4039]60}
61/*}}}*/
[4312]62
63/*Object methods*/
[12365]64/*FUNCTION Patch::fillelementinfo{{{*/
[4042]65void Patch::fillelementinfo(int count, int element_id, int* vertices_ids, int num_vertices){
[4039]66
67 int i;
[12366]68 IssmDouble* row=NULL;
[4039]69
70 /*point to the start of the row: */
71 row=this->values+count*this->numcols;
72
73 /*Let's remember what is on a row:
74 enum_type step time element_id interpolation vertices_ids nodal_values
75 */
76 row[3]=element_id;
77 for(i=0;i<num_vertices;i++){
78 row[5+i]=vertices_ids[i];
79 }
80
81}
82/*}}}*/
[12365]83/*FUNCTION Patch::fillresultinfo{{{*/
[12366]84void Patch::fillresultinfo(int count,int enum_type,int step, IssmDouble time, int interpolation, IssmDouble* nodal_values, int num_nodes){
[4039]85
86 int i;
[12366]87 IssmDouble* row=NULL;
[4039]88
89 /*point to the start of the row: */
90 row=this->values+count*this->numcols;
91
92 /*Let's remember what is on a row:
[13975]93 enum_type step time element_id interpolation vertices_ids nodal_values */
[4039]94 row[0]=enum_type;
[12366]95 row[1]=(IssmDouble)step;
[4039]96 row[2]=time;
97 row[4]=interpolation;
98 for(i=0;i<num_nodes;i++){
99 row[5+this->maxvertices+i]=nodal_values[i];
100 }
101
102}
103/*}}}*/
[12365]104/*FUNCTION Patch::Gather{{{*/
[6851]105void Patch::Gather(void){
[4039]106
[4057]107 int count;
[13975]108 int my_rank;
109 int num_procs;
[4039]110 int total_numrows;
111 int node_numrows;
[12366]112 IssmDouble *total_values = NULL;
[12102]113 #ifdef _HAVE_MPI_
[4057]114 MPI_Status status;
[12102]115 #endif
[4039]116
[13975]117 /*recover my_rank:*/
118 my_rank=IssmComm::GetRank();
119 num_procs=IssmComm::GetSize();
120
[4039]121 /*First, figure out total number of rows combining all the cpus: */
[12102]122 #ifdef _HAVE_MPI_
[13975]123 MPI_Reduce(&this->numrows,&total_numrows,1,MPI_INT,MPI_SUM,0,IssmComm::GetComm() );
124 MPI_Bcast(&total_numrows,1,MPI_INT,0,IssmComm::GetComm());
[12102]125 #else
126 total_numrows=this->numrows;
127 #endif
[4039]128
[4873]129 /*return if patch empty*/
130 if(total_numrows==0) return;
131
[4039]132 /*Now, allocate buffer to holds all the values, on node 0: */
[12366]133 if(my_rank==0)total_values=xNew<IssmDouble>(this->numcols*total_numrows);
[4039]134
135 /*Start by copying node 0 values onto total_values: */
136 if(my_rank==0){
137 count=0;
[12366]138 xMemCpy<IssmDouble>(total_values+count,this->values,this->numcols*this->numrows);
[4039]139 count+=this->numrows*this->numcols;
140 }
141
142 /*Now, ask other nodes to send their values: */
[12102]143 #ifdef _HAVE_MPI_
[13975]144 for(int i=1;i<num_procs;i++){
[4039]145 if (my_rank==i){
[13975]146 MPI_Send(&this->numrows,1,MPI_INT,0,1,IssmComm::GetComm());
147 if (this->numrows)MPI_Send(this->values,this->numrows*this->numcols,MPI_DOUBLE,0,1,IssmComm::GetComm());
[4039]148 }
149 if (my_rank==0){
[13975]150 MPI_Recv(&node_numrows,1,MPI_INT,i,1,IssmComm::GetComm(),&status);
151 if (node_numrows)MPI_Recv(total_values+count,node_numrows*this->numcols,MPI_DOUBLE,i,1,IssmComm::GetComm(),&status);
[4039]152 count+=node_numrows*this->numcols;
153 }
154 }
[12102]155 #endif
[4039]156
157 /*Now, node 0 has total_values, of size total_numrows*this->numcols. Update the fields in the patch, to reflect this new
158 * reality. For other cpus, no point in keeping their data anymore: */
159 if(my_rank==0){
160 this->numrows=total_numrows;
[12366]161 xDelete<IssmDouble>(this->values);
[4039]162 this->values=total_values;
163 }
[12102]164 #ifdef _HAVE_MPI_
[4039]165 else{
166 this->numrows=0;
[12366]167 xDelete<IssmDouble>(this->values);
[4039]168 }
[12102]169 #endif
[4546]170}/*}}}*/
Note: See TracBrowser for help on using the repository browser.