1 | /*!\file: SpawnCore.cpp
|
---|
2 | * \brief: run core ISSM solution using Dakota inputs coming from CPU 0.
|
---|
3 | * \sa qmu.cpp DakotaPlugin.cpp
|
---|
4 | *
|
---|
5 | * This routine needs to be understood simultaneously with qmu.cpp and DakotaPlugin.
|
---|
6 | * SpawnCore is called by all CPUS, with CPU 0 holding Dakota variable values, along
|
---|
7 | * with variable descriptors.
|
---|
8 | *
|
---|
9 | * SpawnCore takes care of broadcasting the variables and their descriptors across the MPI
|
---|
10 | * ring. Once this is done, we use the variables to modify the inputs for the solution core.
|
---|
11 | * For ex, if "rho_ice" is provided, for ex 920, we include "rho_ice" in the inputs, then
|
---|
12 | * call the core with the modified inputs. This is the way we get Dakota to explore the parameter
|
---|
13 | * spce of the core.
|
---|
14 | *
|
---|
15 | * Once the core is called, we process the results of the core, and using the processed results,
|
---|
16 | * we compute response functions. The responses are computed on all CPUS, but they are targeted
|
---|
17 | * for CPU 0, which will get these values back to the Dakota engine.
|
---|
18 | *
|
---|
19 | */
|
---|
20 |
|
---|
21 | #ifdef HAVE_CONFIG_H
|
---|
22 | #include "config.h"
|
---|
23 | #else
|
---|
24 | #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
|
---|
25 | #endif
|
---|
26 |
|
---|
27 | #undef __FUNCT__
|
---|
28 | #define __FUNCT__ "SpawnCore"
|
---|
29 |
|
---|
30 | #include "../objects/objects.h"
|
---|
31 | #include "../io/io.h"
|
---|
32 | #include "../EnumDefinitions/EnumDefinitions.h"
|
---|
33 | #include "../shared/shared.h"
|
---|
34 | #include "./parallel.h"
|
---|
35 | #include "../include/macros.h"
|
---|
36 |
|
---|
37 | void SpawnCore(double* responses, double* variables, char** variables_descriptors,int numvariables, FemModel* femmodels,ParameterInputs* inputs,int analysis_type,int sub_analysis_type,int counter){
|
---|
38 |
|
---|
39 | int i;
|
---|
40 |
|
---|
41 | /*output from core solutions: */
|
---|
42 | DataSet* results=NULL;
|
---|
43 |
|
---|
44 | char** responses_descriptors=NULL;
|
---|
45 | int numresponses;
|
---|
46 | Param* param=NULL;
|
---|
47 | char* string=NULL;
|
---|
48 | int string_length;
|
---|
49 | double* qmu_part=NULL;
|
---|
50 | int qmu_npart;
|
---|
51 | int debug=0;
|
---|
52 |
|
---|
53 | extern int my_rank;
|
---|
54 |
|
---|
55 |
|
---|
56 | /*synchronize all cpus, as CPU 0 is probably late (it is starting the entire dakota strategy!) : */
|
---|
57 | MPI_Barrier(MPI_COMM_WORLD);
|
---|
58 |
|
---|
59 | /*some parameters needed: */
|
---|
60 | femmodels[0].parameters->FindParam((void*)&debug,"debug");
|
---|
61 |
|
---|
62 | /*First off, recover the response descriptors for the response functions: */
|
---|
63 | param=(Param*)femmodels[0].parameters->FindParamObject("responsedescriptors");
|
---|
64 | if(!param)throw ErrorException(__FUNCT__," could not find response descriptors!");
|
---|
65 |
|
---|
66 | numresponses=param->GetM();
|
---|
67 | param->GetParameterValue(&responses_descriptors);
|
---|
68 |
|
---|
69 | /*Recover partitioning for dakota: */
|
---|
70 | femmodels[0].parameters->FindParam((void*)&qmu_npart,"qmu_npart");
|
---|
71 | femmodels[0].parameters->FindParam((void*)&qmu_part,"qmu_part");
|
---|
72 | #ifdef _DEBUG_
|
---|
73 | for(i=0;i<numresponses;i++){
|
---|
74 | PetscSynchronizedPrintf(MPI_COMM_WORLD,"response descriptor %i: %s\n",i,responses_descriptors[i]);
|
---|
75 | PetscSynchronizedFlush(MPI_COMM_WORLD);
|
---|
76 | }
|
---|
77 | #endif
|
---|
78 |
|
---|
79 | /*broadcast variables: only cpu 0 has correct values*/
|
---|
80 | MPI_Bcast(&numvariables,1,MPI_INT,0,MPI_COMM_WORLD);
|
---|
81 | if(my_rank!=0)variables=(double*)xmalloc(numvariables*sizeof(double));
|
---|
82 | MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,MPI_COMM_WORLD);
|
---|
83 |
|
---|
84 | #ifdef _DEBUG_
|
---|
85 | for(i=0;i<numvariables;i++){
|
---|
86 | PetscSynchronizedPrintf(MPI_COMM_WORLD,"variable %i: %g\n",i,variables[i]);
|
---|
87 | PetscSynchronizedFlush(MPI_COMM_WORLD);
|
---|
88 | }
|
---|
89 | #endif
|
---|
90 |
|
---|
91 | /*broadcast variables_descriptors: */
|
---|
92 | if(my_rank!=0){
|
---|
93 | variables_descriptors=(char**)xmalloc(numvariables*sizeof(char*));
|
---|
94 | }
|
---|
95 | for(i=0;i<numvariables;i++){
|
---|
96 | if(my_rank==0){
|
---|
97 | string=variables_descriptors[i];
|
---|
98 | string_length=(strlen(string)+1)*sizeof(char);
|
---|
99 | }
|
---|
100 | MPI_Bcast(&string_length,1,MPI_INT,0,MPI_COMM_WORLD);
|
---|
101 | if(my_rank!=0)string=(char*)xmalloc(string_length);
|
---|
102 | MPI_Bcast(string,string_length,MPI_CHAR,0,MPI_COMM_WORLD);
|
---|
103 | if(my_rank!=0)variables_descriptors[i]=string;
|
---|
104 | }
|
---|
105 |
|
---|
106 | #ifdef _DEBUG_
|
---|
107 | for(i=0;i<numvariables;i++){
|
---|
108 | PetscSynchronizedPrintf(MPI_COMM_WORLD,"variable descriptor %i: %s\n",i,variables_descriptors[i]);
|
---|
109 | PetscSynchronizedFlush(MPI_COMM_WORLD);
|
---|
110 | }
|
---|
111 | #endif
|
---|
112 |
|
---|
113 |
|
---|
114 |
|
---|
115 | if(debug)_printf_("Iteration: %i\n",counter);
|
---|
116 |
|
---|
117 | //initialize results:
|
---|
118 | results=new DataSet(ResultsEnum());
|
---|
119 |
|
---|
120 | /*Modify core inputs to reflect the dakota variables inputs: */
|
---|
121 | inputs->UpdateFromDakota(variables,variables_descriptors,numvariables,femmodels[0].parameters,qmu_part,qmu_npart); //femmodel #0 is the one holding the parameters for Dakota.
|
---|
122 |
|
---|
123 | /*Run the analysis core solution sequence, with the updated inputs: */
|
---|
124 | if(analysis_type==DiagnosticAnalysisEnum()){
|
---|
125 |
|
---|
126 | if(debug)_printf_("Starting diagnostic core\n");
|
---|
127 |
|
---|
128 | diagnostic_core(results,femmodels,inputs);
|
---|
129 |
|
---|
130 | }
|
---|
131 | else if(analysis_type==ThermalAnalysisEnum()){
|
---|
132 |
|
---|
133 | if(debug)_printf_("Starting thermal core\n");
|
---|
134 | thermal_core(results,femmodels,inputs);
|
---|
135 |
|
---|
136 | }
|
---|
137 | else if(analysis_type==PrognosticAnalysisEnum()){
|
---|
138 |
|
---|
139 | if(debug)_printf_("Starting prognostic core\n");
|
---|
140 | prognostic_core(results,femmodels,inputs);
|
---|
141 |
|
---|
142 | }
|
---|
143 | else if(analysis_type==TransientAnalysisEnum()){
|
---|
144 |
|
---|
145 | if(debug)_printf_("Starting transient core\n");
|
---|
146 | transient_core(results,femmodels,inputs);
|
---|
147 |
|
---|
148 | }
|
---|
149 | else throw ErrorException(__FUNCT__,exprintf("%s%i%s%i%s"," analysis_type ",analysis_type," and sub_analysis_type ",sub_analysis_type," not supported yet!"));
|
---|
150 |
|
---|
151 |
|
---|
152 |
|
---|
153 | /*Now process the outputs, before computing the dakota responses: */
|
---|
154 | if(debug)_printf_("process results:\n");
|
---|
155 | ProcessResults(&results,femmodels,analysis_type);
|
---|
156 |
|
---|
157 |
|
---|
158 | /*compute responses on cpu 0: dummy for now! */
|
---|
159 | if(debug)_printf_("compute dakota responses:\n");
|
---|
160 | DakotaResponses(responses,responses_descriptors,numresponses,femmodels,results,analysis_type,sub_analysis_type);
|
---|
161 |
|
---|
162 | /*Free ressources:*/
|
---|
163 | delete results;
|
---|
164 |
|
---|
165 | //variables only on cpu != 0
|
---|
166 | if(my_rank!=0){
|
---|
167 | xfree((void**)&variables);
|
---|
168 | for(i=0;i<numvariables;i++){
|
---|
169 | string=variables_descriptors[i];
|
---|
170 | xfree((void**)&string);
|
---|
171 | }
|
---|
172 | xfree((void**)&variables_descriptors);
|
---|
173 | }
|
---|
174 | //responses descriptors
|
---|
175 | for(i=0;i<numresponses;i++){
|
---|
176 | string=responses_descriptors[i];
|
---|
177 | xfree((void**)&string);
|
---|
178 | }
|
---|
179 | //rest of dynamic allocations.
|
---|
180 | xfree((void**)&responses_descriptors);
|
---|
181 | xfree((void**)&qmu_part);
|
---|
182 | }
|
---|
183 |
|
---|