Changeset 25761
- Timestamp:
- 11/16/20 14:06:39 (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/c/modules/InputUpdateFromDakotax/InputUpdateFromDakotax.cpp
r25539 r25761 12 12 #include "../InputUpdateFromConstantx/InputUpdateFromConstantx.h" 13 13 #include "../InputUpdateFromVectorDakotax/InputUpdateFromVectorDakotax.h" 14 14 15 15 void InputUpdateFromDakotax(FemModel* femmodel,double* variables,char* *variables_descriptors,int numdakotavariables){ /*{{{*/ 16 16 … … 34 34 35 35 /*retrieve parameters: */ 36 femmodel->parameters->FindParam(&variable_partitions,&variable_partitions_num,NULL,NULL,QmuVariablePartitionsEnum); 37 femmodel->parameters->FindParam(&variable_partitions_npart,NULL,NULL,QmuVariablePartitionsNpartEnum); 38 femmodel->parameters->FindParam(&variable_partitions_nt,NULL,NULL,QmuVariablePartitionsNtEnum); 39 40 41 /*Go through all dakota descriptors, ex: "rho_ice","thermal_conductivity","thickness1","thickness2", etc ..., and 42 * for each descriptor, take the variable value and plug it into the inputs (more or less :)): 43 * We also start with distributed and standard values , as they tend to be used to pluck data from a multi-modle ensemble (mme) 44 * which can then be scaled. Doing the scaling first would be impractical, as the entire mme would have to be scaled, 36 femmodel->parameters->FindParam(&variable_partitions,&variable_partitions_num,NULL,NULL,QmuVariablePartitionsEnum); 37 femmodel->parameters->FindParam(&variable_partitions_npart,NULL,NULL,QmuVariablePartitionsNpartEnum); 38 femmodel->parameters->FindParam(&variable_partitions_nt,NULL,NULL,QmuVariablePartitionsNtEnum); 39 40 41 /*Go through all dakota descriptors, ex: "rho_ice","thermal_conductivity","thickness1","thickness2", etc ..., and 42 * for each descriptor, take the variable value and plug it into the inputs (more or less :)): 43 * We also start with distributed and standard values , as they tend to be used to pluck data from a multi-modle ensemble (mme) 44 * which can then be scaled. Doing the scaling first would be impractical, as the entire mme would have to be scaled, 45 45 * which is a waste of time:*/ 46 46 47 47 variablecount=0; 48 for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions. 48 for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions. 49 49 50 50 descriptor=variables_descriptors[i]; 51 51 52 52 53 53 /*From descriptor, figure out if the variable is scaled, indexed, distributed or just a simple variable: */ 54 if (strncmp(descriptor,"scaled_",7)==0){ 54 if (strncmp(descriptor,"scaled_",7)==0){ 55 55 /*we are skipping these for now.*/ 56 56 npart=variable_partitions_npart[variablecount]; 57 57 nt=variable_partitions_nt[variablecount]; 58 58 59 59 /*increment i to skip the distributed values just collected: */ 60 60 i+=npart*nt-1; //careful, the for loop will add 1. … … 66 66 /*we are skipping these for now.*/ 67 67 } 68 68 69 69 else if (strncmp(descriptor,"distributed_",12)==0){ 70 70 if (VerboseQmu())_printf0_(" updating variable " << descriptor << "\n"); 71 71 72 72 /*recover partition vector: */ 73 73 variable_partition=variable_partitions[variablecount]; 74 74 npart=variable_partitions_npart[variablecount]; 75 75 76 /*Variable is distributed. Determine root name of variable (ex: distributed_DragCoefficient_1 -> DragCoefficient). 76 /*Variable is distributed. Determine root name of variable (ex: distributed_DragCoefficient_1 -> DragCoefficient). 77 77 * Allocate distributed_values and fill the distributed_values with the next npart variables: */ 78 78 … … 86 86 87 87 //for (int j=0;j<npart;j++)_printf_(j << ":" << distributed_values[j] << "\n"); 88 88 89 89 //Call specialty code: 90 90 InputUpdateSpecialtyCode(femmodel,distributed_values,variable_partition,npart,root); 91 91 92 92 /*increment i to skip the distributed values just collected: */ 93 93 i+=npart-1; //careful, the for loop will add 1. 94 94 95 95 /*Free allocations: */ 96 96 xDelete<double>(parameter); … … 104 104 variablecount++; 105 105 } 106 106 107 107 variablecount=0; 108 108 /*now deal with scaled variabes:*/ 109 for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions. 109 for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions. 110 110 111 111 descriptor=variables_descriptors[i]; 112 112 113 113 /*From descriptor, figure out if the variable is scaled, indexed, distributed or just a simple variable: */ 114 114 if (strncmp(descriptor,"scaled_",7)==0){ 115 115 116 116 if (VerboseQmu())_printf0_(" updating variable " << descriptor << "\n"); 117 117 118 118 /*recover partition vector: */ 119 119 variable_partition=variable_partitions[variablecount]; … … 121 121 nt=variable_partitions_nt[variablecount]; 122 122 123 /* Variable is scaled, determine its root name (ex: scaled_DragCoefficient_1 -> DragCoefficient). Allocate distributed_values and fill the 123 /* Variable is scaled, determine its root name (ex: scaled_DragCoefficient_1 -> DragCoefficient). Allocate distributed_values and fill the 124 124 * distributed_values with the next npart variables coming from Dakota: */ 125 125 memcpy(root,strstr(descriptor,"_")+1,(strlen(strstr(descriptor,"_")+1)+1)*sizeof(char)); … … 136 136 /*increment i to skip the distributed values just collected: */ 137 137 i+=npart*nt-1; //careful, the for loop will add 1. 138 138 139 139 /*Free allocations: */ 140 140 xDelete<double>(parameter); … … 202 202 transientinput2 = femmodel->inputs->GetTransientInput(DummyEnum); 203 203 204 for(Object* & object : femmodel->elements->objects){ 205 Tria* element=xDynamicCast<Tria*>(object); 206 207 if((int)variable_partition[element->Sid()]==-1)id=0; //grab background field 208 else id=distributed_values[(int)variable_partition[element->Sid()]]-1; //grab partition field 209 210 /*recover the right field from the mme: */ 211 transientinput = datasetinput->GetTransientInputByOffset(id); _assert_(transientinput); 212 213 /*copy values from the transientinput to the final transientinput2: */ 214 for (int j=0;j<N;j++){ 215 TriaInput* tria_input=transientinput->GetTriaInput(j); 216 element->InputServe(tria_input); 217 if(interpolationenum==P0Enum){ 218 value=tria_input->element_values[0]; 219 transientinput2->AddTriaTimeInput( j,1,&(element->lid),&value,P0Enum); 220 } 221 else if(interpolationenum==P1Enum){ 222 223 /*Get values and lid list*/ 224 const int numvertices = element->GetNumberOfVertices(); 225 int *vertexlids = xNew<int>(numvertices); 226 int *vertexsids = xNew<int>(numvertices); 227 228 /*Recover vertices ids needed to initialize inputs*/ 229 element->GetVerticesLidList(&vertexlids[0]); 230 element->GetVerticesSidList(&vertexsids[0]); 231 values=tria_input->element_values; 232 transientinput2->AddTriaTimeInput( j,numvertices,vertexlids,values,P1Enum); 204 for (int p=npart;p>=0;p--){ 205 int pp=p; 206 if (p==npart)pp=-1; /*so, the logic is, we want to do the -1 partition first, then 207 go from npart-1 to 0 in reverse order:*/ 208 209 for (int i=0;i<femmodel->elements->Size();i++){ 210 int element_partition; 211 212 Tria* element=xDynamicCast<Tria*>(femmodel->elements->GetObjectByOffset(i)); 213 214 element_partition= (int)variable_partition[element->Sid()]; 215 if(element_partition!=pp)continue; 216 217 if(element_partition==-1)id=0; //grab background field 218 else id=distributed_values[element_partition]-1; //grab partition field 219 220 /*recover the right field from the mme: */ 221 transientinput = datasetinput->GetTransientInputByOffset(id); _assert_(transientinput); 222 223 /*copy values from the transientinput to the final transientinput2: */ 224 for (int j=0;j<N;j++){ 225 TriaInput* tria_input=transientinput->GetTriaInput(j); 226 element->InputServe(tria_input); 227 if(interpolationenum==P0Enum){ 228 value=tria_input->element_values[0]; 229 transientinput2->AddTriaTimeInput( j,1,&(element->lid),&value,P0Enum); 230 } 231 else if(interpolationenum==P1Enum){ 232 233 /*Get values and lid list*/ 234 const int numvertices = element->GetNumberOfVertices(); 235 int *vertexlids = xNew<int>(numvertices); 236 int *vertexsids = xNew<int>(numvertices); 237 238 /*Recover vertices ids needed to initialize inputs*/ 239 element->GetVerticesLidList(&vertexlids[0]); 240 element->GetVerticesSidList(&vertexsids[0]); 241 values=tria_input->element_values; 242 transientinput2->AddTriaTimeInput( j,numvertices,vertexlids,values,P1Enum); 243 } 233 244 } 234 245 } … … 247 258 248 259 /*Go through elements, copy input name to dummy, and scale it using the distributed_values and the partition vector:*/ 249 for( Object* & object : femmodel->elements->objects){250 Element* element=xDynamicCast<Element*>( object);260 for(int i=0;i<femmodel->elements->Size();i++){ 261 Element* element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i)); 251 262 element->InputScaleFromDakota(distributed_values,partition,npart,nt,name); 252 263 } 253 264 254 /*We created a dummy input, which was a scaled copy of the name input. Now wipe 265 /*We created a dummy input, which was a scaled copy of the name input. Now wipe 255 266 * out the name input with the new input:*/ 256 267 femmodel->inputs->ChangeEnum(DummyEnum,name);
Note:
See TracChangeset
for help on using the changeset viewer.