source: issm/trunk/src/c/classes/Patch.cpp@ 13395

Last change on this file since 13395 was 13395, checked in by Mathieu Morlighem, 12 years ago

merged trunk-jpl and trunk for revision 13393

File size: 4.4 KB
RevLine 
[4039]1/*!\file Patch.c
2 * \brief: implementation of the Patch object
3 */
4
[12365]5/*Include files: {{{*/
[4039]6#ifdef HAVE_CONFIG_H
[9320]7 #include <config.h>
[4039]8#else
9#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
10#endif
11
[9320]12#include <stdio.h>
[4039]13#include <string.h>
[11399]14#include <math.h>
[12832]15#include "./classes.h"
[4236]16#include "../Container/Container.h"
[4057]17#include "../io/io.h"
[4039]18#include "../EnumDefinitions/EnumDefinitions.h"
19#include "../shared/shared.h"
20#include "../include/include.h"
21/*}}}*/
22
23/*Object constructors and destructors:*/
[12365]24/*FUNCTION Patch::Patch() default constructor {{{*/
[4039]25Patch::Patch(){
26 this->numrows=0;
27 this->numcols=0;
28 this->maxvertices=0;
29 this->maxnodes=0;
[4057]30 this->values=NULL;
[4039]31}
32/*}}}*/
[12365]33/*FUNCTION Patch::Patch(int numrows, int maxvertices, int maxnodes){{{*/
[4039]34Patch::Patch(int in_numrows, int in_maxvertices, int in_maxnodes){
35
[4057]36 int i,j;
[4039]37 this->numrows=in_numrows;
38 this->maxvertices=in_maxvertices;
39 this->maxnodes=in_maxnodes;
40
41 this->numcols=1 //enum_type
42 +1 //step
43 +1 //time
44 +1 //element id
45 +1 //interpolation type
46 +maxvertices //vertices
47 +maxnodes; //nodes
48
[4508]49 //Allocate values and fill with NaN:
50 if (this->numcols*this->numrows==0){
51 this->values=NULL;
52 }
53 else{
[12366]54 this->values=xNew<IssmDouble>(this->numcols*this->numrows);
[4508]55 for(i=0;i<this->numrows;i++){
56 for(j=0;j<this->numcols;j++){
57 this->values[i*this->numcols+j]=NAN;
58 }
[4039]59 }
60 }
61
62}
63/*}}}*/
[12365]64/*FUNCTION Patch::~Patch(){{{*/
[4039]65Patch::~Patch(){
[12366]66 xDelete<IssmDouble>(values);
[4039]67}
68/*}}}*/
[4312]69
70/*Object methods*/
[12365]71/*FUNCTION Patch::fillelementinfo{{{*/
[4042]72void Patch::fillelementinfo(int count, int element_id, int* vertices_ids, int num_vertices){
[4039]73
74 int i;
[12366]75 IssmDouble* row=NULL;
[4039]76
77 /*point to the start of the row: */
78 row=this->values+count*this->numcols;
79
80 /*Let's remember what is on a row:
81 enum_type step time element_id interpolation vertices_ids nodal_values
82 */
83 row[3]=element_id;
84 for(i=0;i<num_vertices;i++){
85 row[5+i]=vertices_ids[i];
86 }
87
88}
89/*}}}*/
[12365]90/*FUNCTION Patch::fillresultinfo{{{*/
[12366]91void Patch::fillresultinfo(int count,int enum_type,int step, IssmDouble time, int interpolation, IssmDouble* nodal_values, int num_nodes){
[4039]92
93 int i;
[12366]94 IssmDouble* row=NULL;
[4039]95
96 /*point to the start of the row: */
97 row=this->values+count*this->numcols;
98
99 /*Let's remember what is on a row:
100 enum_type step time element_id interpolation vertices_ids nodal_values
101 */
102 row[0]=enum_type;
[12366]103 row[1]=(IssmDouble)step;
[4039]104 row[2]=time;
105 row[4]=interpolation;
106 for(i=0;i<num_nodes;i++){
107 row[5+this->maxvertices+i]=nodal_values[i];
108 }
109
110}
111/*}}}*/
[12365]112/*FUNCTION Patch::Gather{{{*/
[6851]113void Patch::Gather(void){
[4039]114
115 int i;
[4057]116 int count;
[4039]117 extern int my_rank;
118 extern int num_procs;
119 int total_numrows;
120 int node_numrows;
[12366]121 IssmDouble *total_values = NULL;
[12102]122 #ifdef _HAVE_MPI_
[4057]123 MPI_Status status;
[12102]124 #endif
[4039]125
126 /*First, figure out total number of rows combining all the cpus: */
[12102]127 #ifdef _HAVE_MPI_
[4039]128 MPI_Reduce(&this->numrows,&total_numrows,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD );
129 MPI_Bcast(&total_numrows,1,MPI_INT,0,MPI_COMM_WORLD);
[12102]130 #else
131 total_numrows=this->numrows;
132 #endif
[4039]133
[4873]134 /*return if patch empty*/
135 if(total_numrows==0) return;
136
[4039]137 /*Now, allocate buffer to holds all the values, on node 0: */
[12366]138 if(my_rank==0)total_values=xNew<IssmDouble>(this->numcols*total_numrows);
[4039]139
140 /*Start by copying node 0 values onto total_values: */
141 if(my_rank==0){
142 count=0;
[12366]143 xMemCpy<IssmDouble>(total_values+count,this->values,this->numcols*this->numrows);
[4039]144 count+=this->numrows*this->numcols;
145 }
146
147 /*Now, ask other nodes to send their values: */
[12102]148 #ifdef _HAVE_MPI_
[4039]149 for (i=1;i<num_procs;i++){
150 if (my_rank==i){
151 MPI_Send(&this->numrows,1,MPI_INT,0,1,MPI_COMM_WORLD);
152 if (this->numrows)MPI_Send(this->values,this->numrows*this->numcols,MPI_DOUBLE,0,1,MPI_COMM_WORLD);
153 }
154 if (my_rank==0){
155 MPI_Recv(&node_numrows,1,MPI_INT,i,1,MPI_COMM_WORLD,&status);
156 if (node_numrows)MPI_Recv(total_values+count,node_numrows*this->numcols,MPI_DOUBLE,i,1,MPI_COMM_WORLD,&status);
157 count+=node_numrows*this->numcols;
158 }
159 }
[12102]160 #endif
[4039]161
162 /*Now, node 0 has total_values, of size total_numrows*this->numcols. Update the fields in the patch, to reflect this new
163 * reality. For other cpus, no point in keeping their data anymore: */
164 if(my_rank==0){
165 this->numrows=total_numrows;
[12366]166 xDelete<IssmDouble>(this->values);
[4039]167 this->values=total_values;
168 }
[12102]169 #ifdef _HAVE_MPI_
[4039]170 else{
171 this->numrows=0;
[12366]172 xDelete<IssmDouble>(this->values);
[4039]173 }
[12102]174 #endif
[4546]175}/*}}}*/
Note: See TracBrowser for help on using the repository browser.