/* * \file Elements.c * \brief: implementation of the Elements class, derived from DataSet class */ /*Headers: {{{1*/ #ifdef HAVE_CONFIG_H #include #else #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!" #endif #include #include #include #include #include "./DataSet.h" #include "../shared/shared.h" #include "../include/include.h" #include "../EnumDefinitions/EnumDefinitions.h" using namespace std; /*}}}*/ /*Object constructors and destructor*/ /*FUNCTION Elements::Elements(){{{1*/ Elements::Elements(){ enum_type=MeshElementsEnum; return; } /*}}}*/ /*FUNCTION Elements::~Elements(){{{1*/ Elements::~Elements(){ return; } /*}}}*/ /*Object management*/ /*FUNCTION Elements::Configure{{{1*/ void Elements::Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters){ vector::iterator object; Element* element=NULL; for ( object=objects.begin() ; object < objects.end(); object++ ){ element=(Element*)(*object); element->Configure(elements,loads,nodes,materials,parameters); } } /*}}}*/ /*FUNCTION Elements::ProcessResultsUnits{{{1*/ void Elements::ProcessResultsUnits(void){ //Process results to be output in the correct units for(int i=0;iSize();i++){ Element* element=(Element*)this->GetObjectByOffset(i); element->ProcessResultsUnits(); } } /*}}}*/ /*FUNCTION Elements::DeleteResults{{{1*/ void Elements::DeleteResults(void){ for (int i=0;iSize();i++){ Element* element=(Element*)this->GetObjectByOffset(i); element->DeleteResults(); } } /*}}}*/ /*FUNCTION Elements::ResultsToPatch{{{1*/ Patch* Elements::ResultsToPatch(void){ /*output: */ Patch* patch=NULL; /*intermediary: */ int i; int count; int numrows; int numvertices; int numnodes; int max_numvertices; int max_numnodes; int element_numvertices; int element_numrows; int element_numnodes; /*We are going to extract from the results within the elements, the desired results , and create a table * of patch information, that will hold, for each element that computed the result that * we desire, the enum_type of the result, the step and time, the id of the element, the interpolation type, the vertices ids, and the values * at the nodes (could be different from the vertices). This will be used for visualization purposes. * For example, we could build the following patch table, for velocities: * 1. on a Beam element, Vx, at step 1, time .5, element id 1, interpolation type P0 (constant), vertices ids 1 and 2, one constant value 4.5 VxEnum 1 .5 1 P0 1 2 4.5 NaN NaN 2. on a Tria element, Vz, at step 2, time .8, element id 2, interpolation type P1 (linear), vertices ids 1 3 and 4, with values at 3 nodes 4.5, 3.2, 2.5 VzEnum 2 .8 2 P1 1 3 4 4.5 3.2 2.5 * ... etc ... * * So what do we need to build the table: the maximum number of vertices included in the table, * and the maximum number of nodal values, as well as the number of rows. Once we have that, * we ask the elements to fill their own row in the table, by looping on the elememnts. * * We will use the Patch object, which will store all of the information needed, and will be able * to output itself to disk on its own. See object/Patch.h for format of this object.*/ /*First, determine maximum number of vertices, nodes, and number of results: */ numrows=0; numvertices=0; numnodes=0; for(i=0;iSize();i++){ Element* element=(Element*)this->GetObjectByOffset(i); element->PatchSize(&element_numrows,&element_numvertices,&element_numnodes); numrows+=element_numrows; if(element_numvertices>numvertices)numvertices=element_numvertices; if(element_numnodes>numnodes)numnodes=element_numnodes; } #ifdef _PARALLEL_ /*Synchronize across cluster, so as to not end up with different sizes for each patch on each cpu: */ MPI_Reduce (&numvertices,&max_numvertices,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); MPI_Bcast(&max_numvertices,1,MPI_INT,0,MPI_COMM_WORLD); numvertices=max_numvertices; MPI_Reduce (&numnodes,&max_numnodes,1,MPI_INT,MPI_MAX,0,MPI_COMM_WORLD ); MPI_Bcast(&max_numnodes,1,MPI_INT,0,MPI_COMM_WORLD); numnodes=max_numnodes; #endif /*Ok, initialize Patch object: */ patch=new Patch(numrows,numvertices,numnodes); /*Now, go through elements, and fill the Patch object: */ count=0; for(i=0;iSize();i++){ Element* element=(Element*)this->GetObjectByOffset(i); element->PatchFill(&count,patch); } return patch; } /*}}}*/ /*FUNCTION Elements::SetCurrentConfiguration{{{1*/ void Elements::SetCurrentConfiguration(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters){ vector::iterator object; Element* element=NULL; for ( object=objects.begin() ; object < objects.end(); object++ ){ element=(Element*)(*object); element->SetCurrentConfiguration(elements,loads,nodes,materials,parameters); } } /*}}}*/ /*FUNCTION Elements::ToResults{{{1*/ void Elements::ToResults(Results* results,Parameters* parameters){ extern int my_rank; extern int num_procs; Patch *patch = NULL; int *resultsenums = NULL; int *resultssizes = NULL; int *resultssteps = NULL; double *resultstimes = NULL; double *vector_serial= NULL; Vector* vector = NULL; bool io_gather; bool results_as_patches; int numberofvertices,numberofelements; int numberofresults,vectorsize; int rank; int minrank; /*Recover parameters: */ parameters->FindParam(&io_gather,SettingsIoGatherEnum); parameters->FindParam(&results_as_patches,SettingsResultsAsPatchesEnum); parameters->FindParam(&numberofvertices,MeshNumberofverticesEnum); parameters->FindParam(&numberofelements,MeshNumberofelementsEnum); if(!results_as_patches){ /*No patch here, we prepare vectors*/ /*Get rank of first cpu that has results*/ if(this->Size()) rank=my_rank; else rank=num_procs; MPI_Allreduce (&rank,&minrank,1,MPI_INT,MPI_MIN,MPI_COMM_WORLD); /*see what the first element of this partition has in stock (this is common to all partitions)*/ if(my_rank==minrank){ if(this->Size()==0) _error_("Cannot write results because there is no element??"); Element* element=(Element*)this->GetObjectByOffset(0); element->ListResultsInfo(&resultsenums,&resultssizes,&resultstimes,&resultssteps,&numberofresults); } MPI_Bcast(&numberofresults,1,MPI_DOUBLE,minrank,MPI_COMM_WORLD); /*Get out if there is no results. Otherwise broadcast info*/ if(!numberofresults) return; if(my_rank!=minrank){ resultsenums=(int*)xmalloc(numberofresults*sizeof(int)); resultssizes=(int*)xmalloc(numberofresults*sizeof(int)); resultstimes=(double*)xmalloc(numberofresults*sizeof(double)); resultssteps=(int*)xmalloc(numberofresults*sizeof(int)); } MPI_Bcast(resultsenums,numberofresults,MPI_INT,minrank,MPI_COMM_WORLD); MPI_Bcast(resultssizes,numberofresults,MPI_INT,minrank,MPI_COMM_WORLD); MPI_Bcast(resultstimes,numberofresults,MPI_DOUBLE,minrank,MPI_COMM_WORLD); MPI_Bcast(resultssteps,numberofresults,MPI_INT,minrank,MPI_COMM_WORLD); /*Loop over all results and get nodal vector*/ for(int i=0;iSize();j++){ Element* element=(Element*)this->GetObjectByOffset(j); element->GetVectorFromResults(vector,i,resultssizes[i]); } vector->Assemble(); /*Serialize and add to results*/ vector_serial=vector->ToMPISerial(); if(my_rank==0){ /*No need to add this vector for all cpus*/ results->AddObject(new DoubleVecExternalResult(results->Size()+1,resultsenums[i],vector_serial,vectorsize,resultssteps[i],resultstimes[i])); } /*clean up*/ xdelete(&vector); xfree((void**)&vector_serial); } } else{ /*create patch object out of all results in this dataset: */ patch=this->ResultsToPatch(); /*Gather onto master cpu 0, if needed: */ #ifdef _PARALLEL_ if(io_gather)patch->Gather(); #endif /*create result object and add to results dataset:*/ if (patch->maxvertices && patch->maxnodes){ results->AddObject(new IntExternalResult(results->Size()+1,PatchVerticesEnum,patch->maxvertices,1,0)); results->AddObject(new IntExternalResult(results->Size()+1,PatchNodesEnum, patch->maxnodes,1,0)); results->AddObject(new DoubleMatExternalResult(results->Size()+1,PatchEnum, patch->values,patch->numrows,patch->numcols,1,0)); } } /*Free ressources:*/ xfree((void**)&resultsenums); xfree((void**)&resultssizes); xfree((void**)&resultstimes); xfree((void**)&resultssteps); delete patch; } /*}}}*/ /*FUNCTION Elements::NumberOfElements{{{1*/ int Elements::NumberOfElements(void){ int local_nelem=0; int numberofelements; #ifdef _PARALLEL_ local_nelem=this->Size(); MPI_Allreduce ( (void*)&local_nelem,(void*)&numberofelements,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); #else numberofelements=this->Size(); #endif return numberofelements; } /*}}}*/ /*FUNCTION Elements::InputCopy{{{1*/ void Elements::InputDuplicate(int input_enum,int output_enum){ for(int i=0;iSize();i++){ Element* element=(Element*)this->GetObjectByOffset(i); element->InputDuplicate(input_enum,output_enum); } } /*}}}*/