source: issm/trunk/src/c/Container/Elements.cpp@ 11027

Last change on this file since 11027 was 11027, checked in by Eric.Larour, 13 years ago

merged trunk-jpl and trunk for revision 11026

File size: 9.2 KB
Line 
1/*
2 * \file Elements.c
3 * \brief: implementation of the Elements class, derived from DataSet class
4 */
5
6/*Headers: {{{1*/
7#ifdef HAVE_CONFIG_H
8 #include <config.h>
9#else
10#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
11#endif
12
13#include <vector>
14#include <functional>
15#include <algorithm>
16#include <iostream>
17
18#include "./DataSet.h"
19#include "../shared/shared.h"
20#include "../include/include.h"
21#include "../EnumDefinitions/EnumDefinitions.h"
22
23using namespace std;
24/*}}}*/
25
26/*Object constructors and destructor*/
27/*FUNCTION Elements::Elements(){{{1*/
28Elements::Elements(){
29 enum_type=MeshElementsEnum;
30 return;
31}
32/*}}}*/
33/*FUNCTION Elements::~Elements(){{{1*/
34Elements::~Elements(){
35 return;
36}
37/*}}}*/
38
39/*Object management*/
40/*FUNCTION Elements::Configure{{{1*/
41void Elements::Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters){
42
43 vector<Object*>::iterator object;
44 Element* element=NULL;
45
46 for ( object=objects.begin() ; object < objects.end(); object++ ){
47
48 element=(Element*)(*object);
49 element->Configure(elements,loads,nodes,materials,parameters);
50
51 }
52
53}
54/*}}}*/
55/*FUNCTION Elements::ProcessResultsUnits{{{1*/
56void Elements::ProcessResultsUnits(void){
57
58 //Process results to be output in the correct units
59 for(int i=0;i<this->Size();i++){
60 Element* element=(Element*)this->GetObjectByOffset(i);
61 element->ProcessResultsUnits();
62 }
63}
64/*}}}*/
65/*FUNCTION Elements::DeleteResults{{{1*/
66void Elements::DeleteResults(void){
67
68 for (int i=0;i<this->Size();i++){
69 Element* element=(Element*)this->GetObjectByOffset(i);
70 element->DeleteResults();
71 }
72}
73/*}}}*/
74/*FUNCTION Elements::ResultsToPatch{{{1*/
75Patch* Elements::ResultsToPatch(void){
76
77 /*output: */
78 Patch* patch=NULL;
79
80 /*intermediary: */
81 int i;
82 int count;
83 int numrows;
84 int numvertices;
85 int numnodes;
86 int max_numvertices;
87 int max_numnodes;
88 int element_numvertices;
89 int element_numrows;
90 int element_numnodes;
91
92 /*We are going to extract from the results within the elements, the desired results , and create a table
93 * of patch information, that will hold, for each element that computed the result that
94 * we desire, the enum_type of the result, the step and time, the id of the element, the interpolation type, the vertices ids, and the values
95 * at the nodes (could be different from the vertices). This will be used for visualization purposes.
96 * For example, we could build the following patch table, for velocities:
97 *
98 1. on a Beam element, Vx, at step 1, time .5, element id 1, interpolation type P0 (constant), vertices ids 1 and 2, one constant value 4.5
99 VxEnum 1 .5 1 P0 1 2 4.5 NaN NaN
100 2. on a Tria element, Vz, at step 2, time .8, element id 2, interpolation type P1 (linear), vertices ids 1 3 and 4, with values at 3 nodes 4.5, 3.2, 2.5
101 VzEnum 2 .8 2 P1 1 3 4 4.5 3.2 2.5
102 * ... etc ...
103 *
104 * So what do we need to build the table: the maximum number of vertices included in the table,
105 * and the maximum number of nodal values, as well as the number of rows. Once we have that,
106 * we ask the elements to fill their own row in the table, by looping on the elememnts.
107 *
108 * We will use the Patch object, which will store all of the information needed, and will be able
109 * to output itself to disk on its own. See object/Patch.h for format of this object.*/
110
111 /*First, determine maximum number of vertices, nodes, and number of results: */
112 numrows=0;
113 numvertices=0;
114 numnodes=0;
115
116 for(i=0;i<this->Size();i++){
117
118 Element* element=(Element*)this->GetObjectByOffset(i);
119 element->PatchSize(&element_numrows,&element_numvertices,&element_numnodes);
120
121 numrows+=element_numrows;
122 if(element_numvertices>numvertices)numvertices=element_numvertices;
123 if(element_numnodes>numnodes)numnodes=element_numnodes;
124 }
125
126 #ifdef _PARALLEL_
127 /*Synchronize across cluster, so as to not end up with different sizes for each patch on each cpu: */
128 MPI_Reduce (&numvertices,&max_numvertices,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD );
129 MPI_Bcast(&max_numvertices,1,MPI_INT,0,MPI_COMM_WORLD);
130 numvertices=max_numvertices;
131
132 MPI_Reduce (&numnodes,&max_numnodes,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD );
133 MPI_Bcast(&max_numnodes,1,MPI_INT,0,MPI_COMM_WORLD);
134 numnodes=max_numnodes;
135 #endif
136
137 /*Ok, initialize Patch object: */
138 patch=new Patch(numrows,numvertices,numnodes);
139
140 /*Now, go through elements, and fill the Patch object: */
141 count=0;
142 for(i=0;i<this->Size();i++){
143 Element* element=(Element*)this->GetObjectByOffset(i);
144 element->PatchFill(&count,patch);
145 }
146
147 return patch;
148}
149/*}}}*/
150/*FUNCTION Elements::SetCurrentConfiguration{{{1*/
151void Elements::SetCurrentConfiguration(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters){
152
153 vector<Object*>::iterator object;
154 Element* element=NULL;
155
156 for ( object=objects.begin() ; object < objects.end(); object++ ){
157
158 element=(Element*)(*object);
159 element->SetCurrentConfiguration(elements,loads,nodes,materials,parameters);
160
161 }
162
163}
164/*}}}*/
165/*FUNCTION Elements::ToResults{{{1*/
166void Elements::ToResults(Results* results,Parameters* parameters){
167
168 extern int my_rank;
169
170 Patch *patch = NULL;
171 int *resultsenums = NULL;
172 int *resultssizes = NULL;
173 int *resultssteps = NULL;
174 double *resultstimes = NULL;
175 double *vector_serial= NULL;
176 Vec vector = NULL;
177 bool io_gather;
178 bool results_as_patches;
179 int numberofvertices,numberofelements;
180 int numberofresults,vectorsize;
181
182 /*Recover parameters: */
183 parameters->FindParam(&io_gather,SettingsIoGatherEnum);
184 parameters->FindParam(&results_as_patches,SettingsResultsAsPatchesEnum);
185 parameters->FindParam(&numberofvertices,MeshNumberofverticesEnum);
186 parameters->FindParam(&numberofelements,MeshNumberofelementsEnum);
187
188 if(!results_as_patches){
189 /*No patch here, we prepare vectors*/
190
191 /*see what the first element of this partition has in stock (this is common to all partitions)*/
192 if(my_rank==0){
193 if(this->Size()==0) _error_("Cannot write results because first partition has no element. Maybe too many cpus were requested");
194 Element* element=(Element*)this->GetObjectByOffset(0);
195 element->ListResultsInfo(&resultsenums,&resultssizes,&resultstimes,&resultssteps,&numberofresults);
196 }
197 MPI_Bcast(&numberofresults,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
198
199 /*Get out if there is no results. Otherwise broadcast info*/
200 if(!numberofresults) return;
201 if(my_rank!=0){
202 resultsenums=(int*)xmalloc(numberofresults*sizeof(int));
203 resultssizes=(int*)xmalloc(numberofresults*sizeof(int));
204 resultstimes=(double*)xmalloc(numberofresults*sizeof(double));
205 resultssteps=(int*)xmalloc(numberofresults*sizeof(int));
206 }
207 MPI_Bcast(resultsenums,numberofresults,MPI_INT,0,MPI_COMM_WORLD);
208 MPI_Bcast(resultssizes,numberofresults,MPI_INT,0,MPI_COMM_WORLD);
209 MPI_Bcast(resultstimes,numberofresults,MPI_DOUBLE,0,MPI_COMM_WORLD);
210 MPI_Bcast(resultssteps,numberofresults,MPI_INT,0,MPI_COMM_WORLD);
211
212 /*Loop over all results and get nodal vector*/
213 for(int i=0;i<numberofresults;i++){
214
215 /*Get vector for result number i*/
216 if(resultssizes[i]==P1Enum) vectorsize=numberofvertices;
217 else if(resultssizes[i]==P0Enum) vectorsize=numberofelements;
218 else _error_("Unkown result size: %s",EnumToStringx(resultssizes[i]));
219 vector=NewVec(vectorsize);
220
221 for(int j=0;j<this->Size();j++){
222 Element* element=(Element*)this->GetObjectByOffset(j);
223 element->GetVectorFromResults(vector,i,resultssizes[i]);
224 }
225 VecAssemblyBegin(vector);
226 VecAssemblyEnd(vector);
227
228 /*Serialize and add to results*/
229 VecToMPISerial(&vector_serial,vector);
230 if(my_rank==0){
231 /*No need to add this vector for all cpus*/
232 results->AddObject(new DoubleVecExternalResult(results->Size()+1,resultsenums[i],vector_serial,vectorsize,resultssteps[i],resultstimes[i]));
233 }
234
235 /*clean up*/
236 VecFree(&vector);
237 xfree((void**)&vector_serial);
238 }
239 }
240 else{
241 /*create patch object out of all results in this dataset: */
242 patch=this->ResultsToPatch();
243
244 /*Gather onto master cpu 0, if needed: */
245#ifdef _PARALLEL_
246 if(io_gather)patch->Gather();
247#endif
248
249 /*create result object and add to results dataset:*/
250 if (patch->maxvertices && patch->maxnodes){
251 results->AddObject(new IntExternalResult(results->Size()+1,PatchVerticesEnum,patch->maxvertices,1,0));
252 results->AddObject(new IntExternalResult(results->Size()+1,PatchNodesEnum, patch->maxnodes,1,0));
253 results->AddObject(new DoubleMatExternalResult(results->Size()+1,PatchEnum, patch->values,patch->numrows,patch->numcols,1,0));
254 }
255 }
256
257 /*Free ressources:*/
258 xfree((void**)&resultsenums);
259 xfree((void**)&resultssizes);
260 xfree((void**)&resultstimes);
261 xfree((void**)&resultssteps);
262 delete patch;
263}
264/*}}}*/
265/*FUNCTION Elements::NumberOfElements{{{1*/
266int Elements::NumberOfElements(void){
267
268 int local_nelem=0;
269 int numberofelements;
270
271 #ifdef _PARALLEL_
272 local_nelem=this->Size();
273 MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
274 #else
275 numberofelements=this->Size();
276 #endif
277
278 return numberofelements;
279}
280/*}}}*/
281/*FUNCTION Elements::InputCopy{{{1*/
282void Elements::InputDuplicate(int input_enum,int output_enum){
283
284 for(int i=0;i<this->Size();i++){
285 Element* element=(Element*)this->GetObjectByOffset(i);
286 element->InputDuplicate(input_enum,output_enum);
287 }
288}
289/*}}}*/
Note: See TracBrowser for help on using the repository browser.