| 1 | /*!\file: SpawnCore.cpp
|
|---|
| 2 | * \brief: run core solution, according to analysis_type and sub_analysis_type
|
|---|
| 3 | */
|
|---|
| 4 |
|
|---|
| 5 | #ifdef HAVE_CONFIG_H
|
|---|
| 6 | #include "config.h"
|
|---|
| 7 | #else
|
|---|
| 8 | #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
|
|---|
| 9 | #endif
|
|---|
| 10 |
|
|---|
| 11 | #undef __FUNCT__
|
|---|
| 12 | #define __FUNCT__ "SpawnCore"
|
|---|
| 13 |
|
|---|
| 14 | #include "../objects/objects.h"
|
|---|
| 15 | #include "../io/io.h"
|
|---|
| 16 | #include "../EnumDefinitions/EnumDefinitions.h"
|
|---|
| 17 | #include "../shared/shared.h"
|
|---|
| 18 | #include "./parallel.h"
|
|---|
| 19 | #include "../include/macros.h"
|
|---|
| 20 |
|
|---|
| 21 | void SpawnCore(double* responses, double* variables, char** variables_descriptors,int numvariables, FemModel* femmodels,ParameterInputs* inputs,int analysis_type,int sub_analysis_type){
|
|---|
| 22 |
|
|---|
| 23 | int i;
|
|---|
| 24 |
|
|---|
| 25 | /*output from core solutions: */
|
|---|
| 26 | Vec u_g=NULL;
|
|---|
| 27 | Vec p_g=NULL;
|
|---|
| 28 |
|
|---|
| 29 | char** responses_descriptors=NULL;
|
|---|
| 30 | int numresponses;
|
|---|
| 31 | Param* param=NULL;
|
|---|
| 32 | char* string=NULL;
|
|---|
| 33 | int string_length;
|
|---|
| 34 | double* qmu_part=NULL;
|
|---|
| 35 | int qmu_npart;
|
|---|
| 36 |
|
|---|
| 37 | extern int my_rank;
|
|---|
| 38 |
|
|---|
| 39 | /*synchronize all cpus, as CPU 0 is probably late (it is starting the entire dakota strategy!) : */
|
|---|
| 40 | MPI_Barrier(MPI_COMM_WORLD);
|
|---|
| 41 |
|
|---|
| 42 | /*First off, recover the responses descriptors for the response functions: */
|
|---|
| 43 | param=(Param*)femmodels[0].parameters->FindParamObject("descriptors");
|
|---|
| 44 | numresponses=param->GetM();
|
|---|
| 45 | param->GetParameterValue(&responses_descriptors);
|
|---|
| 46 |
|
|---|
| 47 | /*Recover partitioning for dakota: */
|
|---|
| 48 | femmodels[0].parameters->FindParam((void*)&qmu_npart,"qmu_npart");
|
|---|
| 49 | femmodels[0].parameters->FindParam((void*)&qmu_part,"qmu_part");
|
|---|
| 50 |
|
|---|
| 51 | #ifdef _DEBUG_
|
|---|
| 52 | for(i=0;i<numresponses;i++){
|
|---|
| 53 | PetscSynchronizedPrintf(MPI_COMM_WORLD,"descriptor %i: %s\n",i,responses_descriptors[i]);
|
|---|
| 54 | PetscSynchronizedFlush(MPI_COMM_WORLD);
|
|---|
| 55 | }
|
|---|
| 56 | #endif
|
|---|
| 57 |
|
|---|
| 58 | /*broadcast variables: only cpu 0 has correct values*/
|
|---|
| 59 | MPI_Bcast(&numvariables,1,MPI_INT,0,MPI_COMM_WORLD);
|
|---|
| 60 | if(my_rank!=0)variables=(double*)xmalloc(numvariables*sizeof(double));
|
|---|
| 61 | MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,MPI_COMM_WORLD);
|
|---|
| 62 |
|
|---|
| 63 | #ifdef _DEBUG_
|
|---|
| 64 | for(i=0;i<numvariables;i++){
|
|---|
| 65 | PetscSynchronizedPrintf(MPI_COMM_WORLD,"variable %i: %g\n",i,variables[i]);
|
|---|
| 66 | PetscSynchronizedFlush(MPI_COMM_WORLD);
|
|---|
| 67 | }
|
|---|
| 68 | #endif
|
|---|
| 69 |
|
|---|
| 70 | /*broadcast variables_descriptors: */
|
|---|
| 71 | if(my_rank!=0){
|
|---|
| 72 | variables_descriptors=(char**)xmalloc(numvariables*sizeof(char*));
|
|---|
| 73 | }
|
|---|
| 74 | for(i=0;i<numvariables;i++){
|
|---|
| 75 | if(my_rank==0){
|
|---|
| 76 | string=variables_descriptors[i];
|
|---|
| 77 | string_length=(strlen(string)+1)*sizeof(char);
|
|---|
| 78 | }
|
|---|
| 79 | MPI_Bcast(&string_length,1,MPI_INT,0,MPI_COMM_WORLD);
|
|---|
| 80 | if(my_rank!=0)string=(char*)xmalloc(string_length);
|
|---|
| 81 | MPI_Bcast(string,string_length,MPI_CHAR,0,MPI_COMM_WORLD);
|
|---|
| 82 | if(my_rank!=0)variables_descriptors[i]=string;
|
|---|
| 83 | }
|
|---|
| 84 |
|
|---|
| 85 | #ifdef _DEBUG_
|
|---|
| 86 | for(i=0;i<numvariables;i++){
|
|---|
| 87 | PetscSynchronizedPrintf(MPI_COMM_WORLD,"variable descriptor %i: %s\n",i,variables_descriptors[i]);
|
|---|
| 88 | PetscSynchronizedFlush(MPI_COMM_WORLD);
|
|---|
| 89 | }
|
|---|
| 90 | #endif
|
|---|
| 91 |
|
|---|
| 92 | /*Modify core inputs to reflect the dakota variables inputs: */
|
|---|
| 93 | inputs->UpdateFromDakota(variables,variables_descriptors,numvariables,qmu_part,qmu_npart);
|
|---|
| 94 |
|
|---|
| 95 | /*Run the analysis core solution sequence, with the updated inputs: */
|
|---|
| 96 | if(analysis_type==DiagnosticAnalysisEnum()){
|
|---|
| 97 |
|
|---|
| 98 | _printf_("Starting diagnostic core\n");
|
|---|
| 99 | diagnostic_core(&u_g,&p_g,femmodels,inputs);
|
|---|
| 100 |
|
|---|
| 101 | }
|
|---|
| 102 | else{
|
|---|
| 103 | throw ErrorException(__FUNCT__,exprintf("%s%i%s%i%s"," analysis_type ",analysis_type," and sub_analysis_type ",sub_analysis_type," not supported yet!"));
|
|---|
| 104 | }
|
|---|
| 105 |
|
|---|
| 106 | /*compute responses on cpu 0: dummy for now! */
|
|---|
| 107 | DakotaResponses(responses,responses_descriptors,numresponses,u_g,p_g,analysis_type,sub_analysis_type);
|
|---|
| 108 |
|
|---|
| 109 | /*Free ressources:*/
|
|---|
| 110 |
|
|---|
| 111 | //vectors
|
|---|
| 112 | VecFree(&u_g);
|
|---|
| 113 | VecFree(&p_g);
|
|---|
| 114 |
|
|---|
| 115 | //variables only on cpu != 0
|
|---|
| 116 | if(my_rank!=0){
|
|---|
| 117 | xfree((void**)&variables);
|
|---|
| 118 | for(i=0;i<numvariables;i++){
|
|---|
| 119 | string=variables_descriptors[i];
|
|---|
| 120 | xfree((void**)&string);
|
|---|
| 121 | }
|
|---|
| 122 | xfree((void**)&variables_descriptors);
|
|---|
| 123 | }
|
|---|
| 124 | //responses descriptors
|
|---|
| 125 | for(i=0;i<numresponses;i++){
|
|---|
| 126 | string=responses_descriptors[i];
|
|---|
| 127 | xfree((void**)&string);
|
|---|
| 128 | }
|
|---|
| 129 | //rest of dynamic allocations.
|
|---|
| 130 | xfree((void**)&responses_descriptors);
|
|---|
| 131 | xfree((void**)&qmu_part);
|
|---|
| 132 | }
|
|---|
| 133 |
|
|---|