Changeset 25761


Ignore:
Timestamp:
11/16/20 14:06:39 (4 years ago)
Author:
jdquinn
Message:

CHG: Missing changes from merge from Eric’s branch

File:
1 edited

Legend:

Unmodified
Added
Removed
  • issm/trunk-jpl/src/c/modules/InputUpdateFromDakotax/InputUpdateFromDakotax.cpp

    r25539 r25761  
    1212#include "../InputUpdateFromConstantx/InputUpdateFromConstantx.h"
    1313#include "../InputUpdateFromVectorDakotax/InputUpdateFromVectorDakotax.h"
    14 
     14                       
    1515void  InputUpdateFromDakotax(FemModel* femmodel,double* variables,char* *variables_descriptors,int numdakotavariables){ /*{{{*/
    1616
     
    3434
    3535        /*retrieve parameters: */
    36         femmodel->parameters->FindParam(&variable_partitions,&variable_partitions_num,NULL,NULL,QmuVariablePartitionsEnum);
    37         femmodel->parameters->FindParam(&variable_partitions_npart,NULL,NULL,QmuVariablePartitionsNpartEnum);
    38         femmodel->parameters->FindParam(&variable_partitions_nt,NULL,NULL,QmuVariablePartitionsNtEnum);
    39 
    40 
    41         /*Go through all dakota descriptors, ex: "rho_ice","thermal_conductivity","thickness1","thickness2", etc ..., and
    42          * for each descriptor, take the variable value and plug it into the inputs (more or less :)):
    43          * We also start with distributed and standard values , as they tend to be used to pluck data from a multi-modle ensemble (mme)
    44          * which can then be scaled. Doing the scaling first would be impractical, as the entire mme would have to be scaled,
     36        femmodel->parameters->FindParam(&variable_partitions,&variable_partitions_num,NULL,NULL,QmuVariablePartitionsEnum); 
     37        femmodel->parameters->FindParam(&variable_partitions_npart,NULL,NULL,QmuVariablePartitionsNpartEnum); 
     38        femmodel->parameters->FindParam(&variable_partitions_nt,NULL,NULL,QmuVariablePartitionsNtEnum); 
     39       
     40
     41        /*Go through all dakota descriptors, ex: "rho_ice","thermal_conductivity","thickness1","thickness2", etc ..., and 
     42         * for each descriptor, take the variable value and plug it into the inputs (more or less :)): 
     43         * We also start with distributed and standard values , as they tend to be used to pluck data from a multi-modle ensemble (mme) 
     44         * which can then be scaled. Doing the scaling first would be impractical, as the entire mme would have to be scaled, 
    4545         * which is a waste of time:*/
    4646
    4747        variablecount=0;
    48         for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions.
     48        for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions. 
    4949
    5050                descriptor=variables_descriptors[i];
    51 
     51       
    5252
    5353                /*From descriptor, figure out if the variable is scaled, indexed, distributed or just a simple variable: */
    54                 if (strncmp(descriptor,"scaled_",7)==0){
     54                if (strncmp(descriptor,"scaled_",7)==0){ 
    5555                        /*we are skipping these for now.*/
    5656                        npart=variable_partitions_npart[variablecount];
    5757                        nt=variable_partitions_nt[variablecount];
    58 
     58                               
    5959                        /*increment i to skip the distributed values just collected: */
    6060                        i+=npart*nt-1; //careful, the for loop will add 1.
     
    6666                        /*we are skipping these for now.*/
    6767                }
    68 
     68               
    6969                else if (strncmp(descriptor,"distributed_",12)==0){
    7070                        if (VerboseQmu())_printf0_("   updating variable " << descriptor << "\n");
    71 
     71                       
    7272                        /*recover partition vector: */
    7373                        variable_partition=variable_partitions[variablecount];
    7474                        npart=variable_partitions_npart[variablecount];
    7575
    76                         /*Variable is distributed. Determine root name of variable (ex: distributed_DragCoefficient_1 -> DragCoefficient).
     76                        /*Variable is distributed. Determine root name of variable (ex: distributed_DragCoefficient_1 -> DragCoefficient). 
    7777                         * Allocate distributed_values and fill the distributed_values with the next npart variables: */
    7878
     
    8686
    8787                        //for (int j=0;j<npart;j++)_printf_(j << ":" << distributed_values[j] << "\n");
    88 
     88                       
    8989                        //Call specialty code:
    9090                        InputUpdateSpecialtyCode(femmodel,distributed_values,variable_partition,npart,root);
    91 
     91                       
    9292                        /*increment i to skip the distributed values just collected: */
    9393                        i+=npart-1; //careful, the for loop will add 1.
    94 
     94                       
    9595                        /*Free allocations: */
    9696                        xDelete<double>(parameter);
     
    104104                variablecount++;
    105105        }
    106 
     106       
    107107        variablecount=0;
    108108        /*now deal with scaled variabes:*/
    109         for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions.
     109        for(i=0;i<numdakotavariables;i++){ //these are the dakota variables, for all partitions. 
    110110
    111111                descriptor=variables_descriptors[i];
    112 
     112       
    113113                /*From descriptor, figure out if the variable is scaled, indexed, distributed or just a simple variable: */
    114114                if (strncmp(descriptor,"scaled_",7)==0){
    115 
     115               
    116116                        if (VerboseQmu())_printf0_("   updating variable " << descriptor << "\n");
    117 
     117               
    118118                        /*recover partition vector: */
    119119                        variable_partition=variable_partitions[variablecount];
     
    121121                        nt=variable_partitions_nt[variablecount];
    122122
    123                         /* Variable is scaled, determine its root name (ex: scaled_DragCoefficient_1 -> DragCoefficient). Allocate distributed_values and fill the
     123                        /* Variable is scaled, determine its root name (ex: scaled_DragCoefficient_1 -> DragCoefficient). Allocate distributed_values and fill the 
    124124                         * distributed_values with the next npart variables coming from Dakota: */
    125125                        memcpy(root,strstr(descriptor,"_")+1,(strlen(strstr(descriptor,"_")+1)+1)*sizeof(char));
     
    136136                        /*increment i to skip the distributed values just collected: */
    137137                        i+=npart*nt-1; //careful, the for loop will add 1.
    138 
     138                       
    139139                        /*Free allocations: */
    140140                        xDelete<double>(parameter);
     
    202202        transientinput2 = femmodel->inputs->GetTransientInput(DummyEnum);
    203203
    204         for(Object* & object : femmodel->elements->objects){
    205                 Tria*   element=xDynamicCast<Tria*>(object);
    206 
    207                 if((int)variable_partition[element->Sid()]==-1)id=0; //grab background field
    208                 else id=distributed_values[(int)variable_partition[element->Sid()]]-1; //grab partition field
    209 
    210                 /*recover the right field from the mme: */
    211                 transientinput = datasetinput->GetTransientInputByOffset(id); _assert_(transientinput);
    212 
    213                 /*copy values from the transientinput to the final transientinput2: */
    214                 for (int j=0;j<N;j++){
    215                         TriaInput* tria_input=transientinput->GetTriaInput(j);
    216                         element->InputServe(tria_input);
    217                         if(interpolationenum==P0Enum){
    218                                 value=tria_input->element_values[0];
    219                                 transientinput2->AddTriaTimeInput( j,1,&(element->lid),&value,P0Enum);
    220                         }
    221                         else if(interpolationenum==P1Enum){
    222 
    223                                 /*Get values and lid list*/
    224                                 const int   numvertices     = element->GetNumberOfVertices();
    225                                 int        *vertexlids      = xNew<int>(numvertices);
    226                                 int        *vertexsids      = xNew<int>(numvertices);
    227 
    228                                 /*Recover vertices ids needed to initialize inputs*/
    229                                 element->GetVerticesLidList(&vertexlids[0]);
    230                                 element->GetVerticesSidList(&vertexsids[0]);
    231                                 values=tria_input->element_values;
    232                                 transientinput2->AddTriaTimeInput( j,numvertices,vertexlids,values,P1Enum);
     204        for (int p=npart;p>=0;p--){
     205                int pp=p;
     206                if (p==npart)pp=-1; /*so, the logic is, we want to do the -1 partition first, then
     207                                                         go from npart-1 to 0 in reverse order:*/
     208               
     209                for (int i=0;i<femmodel->elements->Size();i++){
     210                        int element_partition;
     211
     212                        Tria*   element=xDynamicCast<Tria*>(femmodel->elements->GetObjectByOffset(i));
     213                       
     214                        element_partition= (int)variable_partition[element->Sid()];
     215                        if(element_partition!=pp)continue;
     216
     217                        if(element_partition==-1)id=0; //grab background field
     218                        else id=distributed_values[element_partition]-1; //grab partition field
     219
     220                        /*recover the right field from the mme: */
     221                        transientinput = datasetinput->GetTransientInputByOffset(id); _assert_(transientinput);
     222
     223                        /*copy values from the transientinput to the final transientinput2: */
     224                        for (int j=0;j<N;j++){
     225                                TriaInput* tria_input=transientinput->GetTriaInput(j);
     226                                element->InputServe(tria_input);
     227                                if(interpolationenum==P0Enum){
     228                                        value=tria_input->element_values[0];
     229                                        transientinput2->AddTriaTimeInput( j,1,&(element->lid),&value,P0Enum);
     230                                }
     231                                else if(interpolationenum==P1Enum){
     232
     233                                        /*Get values and lid list*/
     234                                        const int   numvertices     = element->GetNumberOfVertices();
     235                                        int        *vertexlids      = xNew<int>(numvertices);
     236                                        int        *vertexsids      = xNew<int>(numvertices);
     237
     238                                        /*Recover vertices ids needed to initialize inputs*/
     239                                        element->GetVerticesLidList(&vertexlids[0]);
     240                                        element->GetVerticesSidList(&vertexsids[0]);
     241                                        values=tria_input->element_values;
     242                                        transientinput2->AddTriaTimeInput( j,numvertices,vertexlids,values,P1Enum);
     243                                }
    233244                        }
    234245                }
     
    247258
    248259        /*Go through elements, copy input name to dummy, and scale it using the distributed_values and the partition vector:*/
    249         for(Object* & object : femmodel->elements->objects){
    250                 Element* element=xDynamicCast<Element*>(object);
     260        for(int i=0;i<femmodel->elements->Size();i++){
     261                Element* element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
    251262                element->InputScaleFromDakota(distributed_values,partition,npart,nt,name);
    252263        }
    253264
    254         /*We created a dummy input, which was a scaled copy of the name input. Now wipe
     265        /*We created a dummy input, which was a scaled copy of the name input. Now wipe 
    255266         * out the name input with the new input:*/
    256267        femmodel->inputs->ChangeEnum(DummyEnum,name);
Note: See TracChangeset for help on using the changeset viewer.