Changeset 962
- Timestamp:
- 06/12/09 15:31:53 (16 years ago)
- Location:
- issm/trunk/src
- Files:
-
- 10 added
- 2 deleted
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk/src/c/DataSet/DataSet.cpp
r848 r962 173 173 if(object){ 174 174 iterator = find(objects.begin(), objects.end(),object); 175 delete *iterator; 175 176 objects.erase(iterator); 176 177 } … … 876 877 for ( object=objects.begin() ; object < objects.end(); object++ ){ 877 878 delete (*object); 878 object=objects.erase(object);879 }879 } 880 objects.clear(); 880 881 } 881 882 -
issm/trunk/src/c/Makefile.am
r851 r962 32 32 ./objects/Friction.h\ 33 33 ./objects/Friction.cpp\ 34 ./objects/DakotaPlugin.h\ 35 ./objects/DakotaPlugin.cpp\ 34 36 ./objects/Node.h\ 35 37 ./objects/Node.cpp\ … … 266 268 ./ProcessParamsx/ProcessParamsx.cpp\ 267 269 ./ProcessParamsx/ProcessParamsx.h\ 270 ./Qmux/Qmux.h\ 271 ./Qmux/Qmux.cpp\ 272 ./Qmux/SpawnCore.cpp\ 273 ./Qmux/SpawnCoreSerial.cpp\ 268 274 ./FieldDepthAveragex/FieldDepthAveragex.cpp\ 269 275 ./FieldDepthAveragex/FieldDepthAveragex.h\ … … 536 542 ./ProcessParamsx/ProcessParamsx.cpp\ 537 543 ./ProcessParamsx/ProcessParamsx.h\ 544 ./Qmux/Qmux.h\ 545 ./Qmux/Qmux.cpp\ 546 ./Qmux/DakotaResponses.cpp\ 547 ./Qmux/SpawnCore.cpp\ 548 ./Qmux/SpawnCoreParallel.cpp\ 538 549 ./FieldDepthAveragex/FieldDepthAveragex.cpp\ 539 550 ./FieldDepthAveragex/FieldDepthAveragex.h\ … … 550 561 ./parallel/objectivefunctionC.cpp\ 551 562 ./parallel/GradJCompute.cpp\ 552 ./parallel/SpawnCore.cpp\553 563 ./parallel/ProcessResults.cpp\ 554 564 ./parallel/prognostic_core.cpp\ … … 556 566 ./parallel/transient_core_2d.cpp\ 557 567 ./parallel/transient_core_3d.cpp\ 558 ./parallel/qmu.cpp\559 ./parallel/DakotaResponses.cpp\560 568 ./parallel/OutputResults.cpp\ 561 569 ./parallel/OutputControl.cpp -
issm/trunk/src/c/ModelProcessorx/CreateDataSets.cpp
r765 r962 21 21 /*create parameters common to all solutions: */ 22 22 CreateParameters(pparameters,model,model_handle); 23 CreateParametersQmu(pparameters,model,model_handle); 23 24 24 25 /*This is just a high level driver: */ … … 39 40 CreateLoadsDiagnosticHoriz(ploads,model,model_handle); 40 41 CreateParametersDiagnosticHoriz(pparameters,model,model_handle); 41 if(model->qmu_analysis)CreateParametersQmu(pparameters,model,model_handle);42 42 43 43 } … … 78 78 CreateLoadsThermal(ploads,model,model_handle); 79 79 CreateParametersThermal(pparameters,model,model_handle); 80 if(model->qmu_analysis)CreateParametersQmu(pparameters,model,model_handle);81 80 82 81 } … … 94 93 CreateLoadsPrognostic(ploads,model,model_handle); 95 94 CreateParametersPrognostic(pparameters,model,model_handle); 96 if(model->qmu_analysis)CreateParametersQmu(pparameters,model,model_handle);97 95 98 96 } … … 100 98 throw ErrorException(__FUNCT__,exprintf("%s%s%s%s"," analysis_type: ",model->analysis_type," sub_analysis_type: ",model->sub_analysis_type," not supported yet!")); 101 99 } 100 102 101 103 102 -
issm/trunk/src/c/ModelProcessorx/Model.cpp
r816 r962 32 32 33 33 /*!initialize all pointers to 0: */ 34 model->name=NULL; 34 35 model->repository=NULL; 35 36 model->meshtype=NULL; … … 40 41 model->numberofresponses=0; 41 42 model->numberofvariables=0; 42 model->qmu_npart=0; 43 model->qmu_npart=0; 43 44 model->numberofelements=0; 44 45 model->numberofnodes=0; … … 264 265 xfree((void**)&model->repository); 265 266 xfree((void**)&model->meshtype); 267 xfree((void**)&model->name); 266 268 xfree((void**)&model->analysis_type); 267 269 xfree((void**)&model->sub_analysis_type); … … 312 314 /*In ModelInit, we get all the data that is not difficult to get, and that is small: */ 313 315 316 ModelFetchData((void**)&model->name,NULL,NULL,model_handle,"name","String",NULL); 314 317 ModelFetchData((void**)&model->analysis_type,NULL,NULL,model_handle,"analysis_type","String",NULL); 315 318 ModelFetchData((void**)&model->sub_analysis_type,NULL,NULL,model_handle,"sub_analysis_type","String",NULL); -
issm/trunk/src/c/ModelProcessorx/Model.h
r816 r962 13 13 struct Model { 14 14 15 char* name; 15 16 char* repository; 16 17 char* meshtype; -
issm/trunk/src/c/ModelProcessorx/Qmu/CreateParametersQmu.cpp
r765 r962 11 11 #include "../../objects/objects.h" 12 12 #include "../../shared/shared.h" 13 #include "../../include/macros.h" 13 14 #include "../../MeshPartitionx/MeshPartitionx.h" 14 15 #include "../Model.h" … … 33 34 double* dakota_parameter=NULL; 34 35 36 //qmu files 37 char* qmuinname=NULL; 38 char* qmuerrname=NULL; 39 char* qmuoutname=NULL; 40 41 #ifdef _SERIAL_ 42 mxArray* pfield=NULL; 43 mxArray* pfield2=NULL; 44 #endif 45 35 46 /*recover parameters : */ 36 47 parameters=*pparameters; … … 44 55 parameters->AddObject(param); 45 56 46 count++; 47 param= new Param(count,"qmu_npart",INTEGER); 48 param->SetInteger(model->qmu_npart); 49 parameters->AddObject(param); 50 51 /*Deal with variables for qmu modeling: */ 52 variabledescriptors=(char**)xmalloc(model->numberofvariables*sizeof(char*)); 53 tag=(char*)xmalloc((strlen("variabledescriptori")+1)*sizeof(char)); 54 55 /*Fetch descriptors: */ 56 for(i=0;i<model->numberofvariables;i++){ 57 sprintf(tag,"%s%i","variabledescriptor",i); 58 ModelFetchData((void**)&descriptor,NULL,NULL,model_handle,tag,"String",NULL); 59 variabledescriptors[i]=descriptor; 57 58 if(model->qmu_analysis){ 59 //name of qmu input, error and output files 60 qmuinname=(char*)xmalloc((strlen(model->name)+strlen(".qmu.in")+1)*sizeof(char)); 61 sprintf(qmuinname,"%s%s",model->name,".qmu.in"); 62 63 count++; 64 param= new Param(count,"qmuinname",STRING); 65 param->SetString(qmuinname); 66 parameters->AddObject(param); 67 68 qmuoutname=(char*)xmalloc((strlen(model->name)+strlen(".qmu.out")+1)*sizeof(char)); 69 sprintf(qmuoutname,"%s%s",model->name,".qmu.out"); 70 71 count++; 72 param= new Param(count,"qmuoutname",STRING); 73 param->SetString(qmuoutname); 74 parameters->AddObject(param); 75 76 qmuerrname=(char*)xmalloc((strlen(model->name)+strlen(".qmu.err")+1)*sizeof(char)); 77 sprintf(qmuerrname,"%s%s",model->name,".qmu.err"); 78 79 count++; 80 param= new Param(count,"qmuerrname",STRING); 81 param->SetString(qmuerrname); 82 parameters->AddObject(param); 83 84 //npart 85 count++; 86 param= new Param(count,"qmu_npart",INTEGER); 87 param->SetInteger(model->qmu_npart); 88 parameters->AddObject(param); 89 90 /*Deal with variables for qmu modeling: */ 91 variabledescriptors=(char**)xmalloc(model->numberofvariables*sizeof(char*)); 92 93 /*Fetch descriptors: logic varies if we are running parallel or serial. In parallel, qmumarshall 94 * took care of marshalling all the variable descriptors, so it's easy. In serial mode, 95 * the variables are in md.variables(md.ivar), as a strucuture: */ 96 97 #ifdef _SERIAL_ 98 pfield=mxGetField(model_handle,0,"variabledescriptors"); 99 for(i=0;i<model->numberofvariables;i++){ 100 pfield2=mxGetCell(pfield,i); 101 FetchData((void**)&descriptor,NULL,NULL,pfield2,"String",NULL); 102 variabledescriptors[i]=descriptor; 103 } 104 #else 105 tag=(char*)xmalloc((strlen("variabledescriptori")+1)*sizeof(char)); 106 for(i=0;i<model->numberofvariables;i++){ 107 sprintf(tag,"%s%i","variabledescriptor",i); 108 ModelFetchData((void**)&descriptor,NULL,NULL,model_handle,tag,"String",NULL); 109 variabledescriptors[i]=descriptor; 110 } 111 #endif 112 113 /*Ok, we have all the variable descriptors. Build a parameter with it: */ 114 count++; 115 param= new Param(count,"variabledescriptors",STRINGARRAY); 116 param->SetStringArray(variabledescriptors,model->numberofvariables); 117 parameters->AddObject(param); 118 119 120 /*Deal with responses and partition for qmu modeling: */ 121 responsedescriptors=(char**)xmalloc(model->numberofresponses*sizeof(char*)); 122 123 /*Fetch descriptors: */ 124 #ifdef _SERIAL_ 125 pfield=mxGetField(model_handle,0,"responsedescriptors"); 126 for(i=0;i<model->numberofresponses;i++){ 127 pfield2=mxGetCell(pfield,i); 128 FetchData((void**)&descriptor,NULL,NULL,pfield2,"String",NULL); 129 responsedescriptors[i]=descriptor; 130 } 131 #else 132 xfree((void**)&tag); 133 tag=(char*)xmalloc((strlen("responsedescriptori")+1)*sizeof(char)); 134 135 for(i=0;i<model->numberofresponses;i++){ 136 sprintf(tag,"%s%i","responsedescriptor",i); 137 ModelFetchData((void**)&descriptor,NULL,NULL,model_handle,tag,"String",NULL); 138 responsedescriptors[i]=descriptor; 139 } 140 #endif 141 142 /*Ok, we have all the response descriptors. Build a parameter with it: */ 143 count++; 144 param= new Param(count,"responsedescriptors",STRINGARRAY); 145 param->SetStringArray(responsedescriptors,model->numberofresponses); 146 parameters->AddObject(param); 147 148 #ifdef _DEBUG_ 149 for(i=0;i<model->numberofvariables;i++){ 150 _printf_("variable descriptor %s\n",variabledescriptors[i]); 151 } 152 153 for(i=0;i<model->numberofresponses;i++){ 154 _printf_("response descriptor %s\n",responsedescriptors[i]); 155 } 156 #endif 157 158 /*partition grids in model->qmu_npart parts: */ 159 160 if(strcmp(model->meshtype,"2d")==0){ 161 ModelFetchData((void**)&model->elements,NULL,NULL,model_handle,"elements","Matrix","Mat"); 162 elements_width=3; //tria elements 163 } 164 else{ 165 ModelFetchData((void**)&model->elements2d,NULL,NULL,model_handle,"elements2d","Matrix","Mat"); 166 elements_width=6; //penta elements 167 } 168 169 MeshPartitionx(&epart, &part,model->numberofelements,model->numberofnodes,model->elements, model->numberofelements2d,model->numberofnodes2d,model->elements2d,model->numlayers,elements_width, model->meshtype,model->qmu_npart); 170 171 dpart=(double*)xmalloc(model->numberofnodes*sizeof(double)); 172 for(i=0;i<model->numberofnodes;i++)dpart[i]=part[i]; 173 174 count++; 175 param= new Param(count,"qmu_part",DOUBLEVEC); 176 param->SetDoubleVec(dpart,model->numberofnodes,1); 177 parameters->AddObject(param); 178 179 /*Ok, now if any of the variables input from Dakota are distributed, we are going to need the parameters: */ 180 for(i=0;i<model->numberofvariables;i++){ 181 182 descriptor=variabledescriptors[i]; 183 184 if ((strcmp(descriptor,"thickness")==0) || 185 (strcmp(descriptor,"drag") ==0) 186 ){ 187 188 //Fetch data: 189 ModelFetchData((void**)&dakota_parameter,NULL,NULL,model_handle,descriptor,"Matrix","Mat"); 190 191 //Add parameter 192 count++; 193 param= new Param(count,descriptor,DOUBLEVEC); 194 param->SetDoubleVec(dakota_parameter,model->numberofnodes,1); 195 parameters->AddObject(param); 196 197 //free data 198 xfree((void**)&dakota_parameter); 199 200 } 201 } 202 203 /*Free data: */ 204 xfree((void**)&tag); 205 for(i=0;i<model->numberofresponses;i++){ 206 char* descriptor=responsedescriptors[i]; 207 xfree((void**)&descriptor); 208 } 209 xfree((void**)&responsedescriptors); 210 211 for(i=0;i<model->numberofvariables;i++){ 212 char* descriptor=variabledescriptors[i]; 213 xfree((void**)&descriptor); 214 } 215 xfree((void**)&variabledescriptors); 216 217 xfree((void**)&model->elements); 218 xfree((void**)&model->elements2d); 219 xfree((void**)&epart); 220 xfree((void**)&part); 221 xfree((void**)&dpart); 222 xfree((void**)&qmuinname); 223 xfree((void**)&qmuerrname); 224 xfree((void**)&qmuoutname); 60 225 } 61 62 /*Ok, we have all the variable descriptors. Build a parameter with it: */63 count++;64 param= new Param(count,"variabledescriptors",STRINGARRAY);65 param->SetStringArray(variabledescriptors,model->numberofvariables);66 parameters->AddObject(param);67 68 xfree((void**)&tag);69 70 /*Deal with responses and partition for qmu modeling: */71 responsedescriptors=(char**)xmalloc(model->numberofresponses*sizeof(char*));72 tag=(char*)xmalloc((strlen("responsedescriptori")+1)*sizeof(char));73 74 /*Fetch descriptors: */75 for(i=0;i<model->numberofresponses;i++){76 sprintf(tag,"%s%i","responsedescriptor",i);77 ModelFetchData((void**)&descriptor,NULL,NULL,model_handle,tag,"String",NULL);78 responsedescriptors[i]=descriptor;79 }80 81 /*Ok, we have all the response descriptors. Build a parameter with it: */82 count++;83 param= new Param(count,"responsedescriptors",STRINGARRAY);84 param->SetStringArray(responsedescriptors,model->numberofresponses);85 parameters->AddObject(param);86 87 88 #ifdef _PARALLEL_89 /*partition grids in model->qmu_npart parts: */90 91 if(strcmp(model->meshtype,"2d")==0){92 ModelFetchData((void**)&model->elements,NULL,NULL,model_handle,"elements","Matrix","Mat");93 elements_width=3; //tria elements94 }95 else{96 ModelFetchData((void**)&model->elements2d,NULL,NULL,model_handle,"elements2d","Matrix","Mat");97 elements_width=6; //penta elements98 }99 100 MeshPartitionx(&epart, &part,model->numberofelements,model->numberofnodes,model->elements, model->numberofelements2d,model->numberofnodes2d,model->elements2d,model->numlayers,elements_width, model->meshtype,model->qmu_npart);101 102 dpart=(double*)xmalloc(model->numberofnodes*sizeof(double));103 for(i=0;i<model->numberofnodes;i++)dpart[i]=part[i];104 105 count++;106 param= new Param(count,"qmu_part",DOUBLEVEC);107 param->SetDoubleVec(dpart,model->numberofnodes,1);108 parameters->AddObject(param);109 #endif110 111 /*Ok, now if any of the variables input from Dakota are distributed, we are going to need the parameters: */112 for(i=0;i<model->numberofvariables;i++){113 114 descriptor=variabledescriptors[i];115 116 if ((strcmp(descriptor,"thickness")==0) ||117 (strcmp(descriptor,"drag") ==0)118 ){119 120 //Fetch data:121 ModelFetchData((void**)&dakota_parameter,NULL,NULL,model_handle,descriptor,"Matrix","Mat");122 123 //Add parameter124 count++;125 param= new Param(count,descriptor,DOUBLEVEC);126 param->SetDoubleVec(dakota_parameter,model->numberofnodes,1);127 parameters->AddObject(param);128 129 //free data130 xfree((void**)&dakota_parameter);131 132 }133 }134 135 /*Free data: */136 xfree((void**)&tag);137 for(i=0;i<model->numberofresponses;i++){138 char* descriptor=responsedescriptors[i];139 xfree((void**)&descriptor);140 }141 xfree((void**)&responsedescriptors);142 143 for(i=0;i<model->numberofvariables;i++){144 char* descriptor=variabledescriptors[i];145 xfree((void**)&descriptor);146 }147 xfree((void**)&variabledescriptors);148 149 xfree((void**)&model->elements);150 xfree((void**)&model->elements2d);151 xfree((void**)&epart);152 xfree((void**)&part);153 xfree((void**)&dpart);154 226 155 227 -
issm/trunk/src/c/io/WriteParams.cpp
r1 r962 30 30 31 31 /*intermediary: */ 32 int i ;32 int i,k; 33 33 mxArray* field=NULL; 34 34 Param* param=NULL; … … 40 40 Mat mat=NULL; 41 41 Vec vec=NULL; 42 char** stringarray=NULL; 42 43 double* serial_vec=NULL; 43 44 double* serial_mat=NULL; 44 45 mwSize M,N; 46 mwSize dims[2]={0}; 45 47 46 48 /*Recover data from the parameters dataset: */ … … 77 79 param->GetParameterValue((void*)&string); 78 80 mxSetField( dataref, 0, param->GetParameterName(),mxCreateString(string)); 81 break; 82 83 case STRINGARRAY: 84 param->GetParameterValue((void*)&stringarray); 85 M=param->GetM(); 86 dims[0]=M; 87 dims[1]=1; 88 pfield=mxCreateCellArray(2,dims); 89 for(k=0;k<M;k++){ 90 char* string=stringarray[k]; 91 mxSetCell(pfield,k,mxCreateString(string)); 92 } 93 mxSetField( dataref, 0, param->GetParameterName(),pfield); 79 94 break; 80 95 -
issm/trunk/src/c/issm.h
r848 r962 50 50 #include "./ComputePressurex/ComputePressurex.h" 51 51 #include "./FieldExtrudex/FieldExtrudex.h" 52 #include "./Qmux/Qmux.h" 52 53 53 54 -
issm/trunk/src/c/objects/DakotaPlugin.cpp
r805 r962 26 26 #include "../include/macros.h" 27 27 #include "../objects/objects.h" 28 #include "../ parallel/parallel.h"28 #include "../Qmux/Qmux.h" 29 29 30 30 /*Standard includes: */ … … 44 44 45 45 //constructor 46 DakotaPlugin::DakotaPlugin(const Dakota::ProblemDescDB& problem_db,FemModel* in_femmodels, ParameterInputs* in_inputs, int in_analysis_type, int in_sub_analysis_type):Dakota::DirectApplicInterface(problem_db){ 46 DakotaPlugin::DakotaPlugin(const Dakota::ProblemDescDB& problem_db,void* in_femmodels, void* in_inputs, int in_analysis_type, int in_sub_analysis_type):Dakota::DirectApplicInterface(problem_db){ 47 47 48 48 49 femmodels=in_femmodels; … … 90 91 91 92 /*run core solution: */ 92 SpawnCore(responses, variables,variable_descriptors,numACV,femmodels,inputs,analysis_type,sub_analysis_type,counter);93 SpawnCore(responses,numFns, variables,variable_descriptors,numACV,femmodels,inputs,analysis_type,sub_analysis_type,counter); 93 94 94 95 /*populate responses: */ … … 97 98 } 98 99 100 #ifdef _PARALLEL_ 99 101 /*warn other cpus that we are done running this iteration: */ 100 102 MPI_Bcast(&status,1,MPI_INT,0,MPI_COMM_WORLD); 103 #endif 101 104 102 105 /*Free ressources:*/ -
issm/trunk/src/c/objects/DakotaPlugin.h
r805 r962 18 18 public: 19 19 20 DakotaPlugin(const Dakota::ProblemDescDB& problem_db,FemModel* femmodels, ParameterInputs* inputs, int analysis_type, int sub_analysis_type); 21 ~DakotaPlugin(); 22 23 /*these fields are use by core solutions: */ 24 FemModel* femmodels; 25 ParameterInputs* inputs; 26 int analysis_type; 27 int sub_analysis_type; 28 int counter; 20 DakotaPlugin(const Dakota::ProblemDescDB& problem_db,void* femmodels, void* inputs, int analysis_type, int sub_analysis_type); 21 ~DakotaPlugin(); 22 23 /*these fields are used by core solutions: */ 24 void* femmodels; 25 void* inputs; 26 27 int analysis_type; 28 int sub_analysis_type; 29 int counter; 29 30 30 31 protected: -
issm/trunk/src/c/objects/Param.cpp
r803 r962 58 58 59 59 case STRINGARRAY: 60 61 60 for(i=0;i<M;i++){ 62 61 char* descriptor=stringarray[i]; … … 98 97 #define __FUNCT__ "Param echo" 99 98 void Param::Echo(void){ 99 100 int i,j; 101 102 printf("Param:\n"); 103 printf(" id: %i\n",id); 104 printf(" name: %s\n",name); 105 106 switch(type){ 107 case STRING: 108 printf(" string value: %s\n",string); 109 break; 110 111 case STRINGARRAY: 112 printf(" string array: %i strings\n",M); 113 for(i=0;i<M;i++){ 114 printf(" %i: %s\n",i,stringarray[i]); 115 } 116 break; 117 118 case INTEGER: 119 printf(" integer value: %i\n",integer); 120 break; 121 122 case DOUBLE: 123 printf(" double value: %g\n",ddouble); 124 break; 125 126 case DOUBLEVEC: 127 /*printf(" double vector. size: %i ndof: %i\n",M,ndof); 128 for(i=0;i<M;i++)printf("%g\n",doublevec[i]);*/ 129 break; 130 131 case DOUBLEMAT: 132 /*printf(" double matrix. size: %i,%i\n",M,N); 133 for(i=0;i<M;i++){ 134 for(j=0;j<N;j++){ 135 printf("%g ",*(doublemat+N*i+j)); 136 } 137 printf("\n"); 138 }*/ 139 break; 140 141 case PETSCVEC: 142 /*printf(" Petsc vector: \n"); 143 VecView(vec,PETSC_VIEWER_STDOUT_WORLD);*/ 144 break; 145 146 case PETSCMAT: 147 /*printf(" Petsc matrix: \n"); 148 MatView(mat,PETSC_VIEWER_STDOUT_WORLD);*/ 149 break; 150 151 default: 152 throw ErrorException(__FUNCT__,exprintf("%s%i","unknow parameter type ",type)); 153 } 154 } 155 156 #undef __FUNCT__ 157 #define __FUNCT__ "Param Deep Echo" 158 void Param::DeepEcho(void){ 100 159 101 160 int i,j; … … 152 211 throw ErrorException(__FUNCT__,exprintf("%s%i","unknow parameter type ",type)); 153 212 } 154 }155 156 #undef __FUNCT__157 #define __FUNCT__ "Param Deep Echo"158 void Param::DeepEcho(void){159 160 int i,j;161 162 printf("Param:\n");163 printf(" id: %i\n",id);164 printf(" name: %s\n",name);165 166 switch(type){167 case STRING:168 printf(" string value: %s\n",string);169 break;170 171 case STRINGARRAY:172 printf(" string array: %i strings\n",M);173 for(i=0;i<M;i++){174 printf(" %i: %s\n",i,stringarray[i]);175 }176 177 case INTEGER:178 printf(" integer value: %i\n",integer);179 break;180 181 case DOUBLE:182 printf(" double value: %g\n",ddouble);183 break;184 185 case DOUBLEVEC:186 printf(" double vector. size: %i ndof: %i\n",M,ndof);187 for(i=0;i<M;i++)printf("%g\n",doublevec[i]);188 break;189 190 case DOUBLEMAT:191 printf(" double matrix. size: %i,%i\n",M,N);192 for(i=0;i<M;i++){193 for(j=0;j<N;j++){194 printf("%g ",*(doublemat+N*i+j));195 }196 printf("\n");197 }198 break;199 200 case PETSCVEC:201 printf(" Petsc vector: \n");202 VecView(vec,PETSC_VIEWER_STDOUT_WORLD);203 break;204 205 case PETSCMAT:206 printf(" Petsc matrix: \n");207 MatView(mat,PETSC_VIEWER_STDOUT_WORLD);208 break;209 210 default:211 throw ErrorException(__FUNCT__,exprintf("%s%i","unknow parameter type ",type));212 }213 213 } 214 214 #undef __FUNCT__ … … 222 222 double* serial_mat=NULL; 223 223 int i; 224 char* tempstring=NULL; 224 225 225 226 /*recover marshalled_dataset: */ … … 246 247 memcpy(marshalled_dataset,&M,sizeof(M));marshalled_dataset+=sizeof(M); 247 248 for(i=0;i<M;i++){ 248 int size=(strlen(stringarray[i])+1)*sizeof(char); 249 tempstring=stringarray[i]; 250 int size=(strlen(tempstring)+1)*sizeof(char); 249 251 memcpy(marshalled_dataset,&size,sizeof(size));marshalled_dataset+=sizeof(size); 250 memcpy(marshalled_dataset, &stringarray[i],size);marshalled_dataset+=size;252 memcpy(marshalled_dataset,tempstring,size);marshalled_dataset+=size; 251 253 } 252 254 break; … … 307 309 308 310 default: 309 _printf_("cheinnere\n");310 311 throw ErrorException(__FUNCT__,exprintf("%s%i","unknown parameter type",type)); 311 312 break; … … 320 321 int size; 321 322 int i; 323 char* tempstring=NULL; 322 324 323 325 size=sizeof(id)+ … … 335 337 size+=sizeof(M); 336 338 for(i=0;i<M;i++){ 337 size+=sizeof(integer); 338 size+=(strlen(stringarray[i])+1)*sizeof(char); 339 tempstring=stringarray[i]; 340 size+=sizeof(M); 341 size+=(strlen(tempstring)+1)*sizeof(char); 339 342 } 340 343 break; … … 383 386 int i; 384 387 double sparsity=.001; 388 char* tempstring=NULL; 385 389 386 390 /*recover marshalled_dataset: */ … … 394 398 memcpy(&type,marshalled_dataset,sizeof(type));marshalled_dataset+=sizeof(type); 395 399 memcpy(&ndof,marshalled_dataset,sizeof(ndof));marshalled_dataset+=sizeof(ndof); 396 397 400 398 401 switch(type){ … … 409 412 int size; 410 413 memcpy(&size,marshalled_dataset,sizeof(integer));marshalled_dataset+=sizeof(integer); 411 memcpy(&stringarray[i],marshalled_dataset,size);marshalled_dataset+=size; 414 tempstring=(char*)xmalloc(size); 415 memcpy(tempstring,marshalled_dataset,size);marshalled_dataset+=size; 416 stringarray[i]=tempstring; 412 417 } 413 418 } -
issm/trunk/src/c/objects/ParameterInputs.cpp
r803 r962 120 120 * input: */ 121 121 input=new Input(name,vector,ndof,numberofnodes); 122 122 123 123 /*Add input to dataset: */ 124 124 dataset->AddObject(input); … … 329 329 } 330 330 331 #ifdef _DEBUG_ 332 PetscSynchronizedPrintf(MPI_COMM_WORLD,"Parameter vetor:"); 333 PetscSynchronizedFlush(MPI_COMM_WORLD); 334 for(k=0;k<numberofnodes;k++){ 335 PetscSynchronizedPrintf(MPI_COMM_WORLD," node %i value %g\n",k+1,parameter[k]); 336 PetscSynchronizedFlush(MPI_COMM_WORLD); 337 } 338 #endif 339 340 331 341 /*Add parameter to inputs: */ 332 342 this->Add(root,parameter,1,numberofnodes); -
issm/trunk/src/c/parallel/OutputControl.cpp
r872 r962 6 6 #include "../EnumDefinitions/EnumDefinitions.h" 7 7 #include "../objects/objects.h" 8 #include "../shared/shared.h" 8 9 9 10 #ifdef HAVE_CONFIG_H … … 24 25 int gsize; 25 26 26 /*recover numberofnodes: */27 /*recover parameters: */ 27 28 fem->parameters->FindParam((void*)&numberofnodes,"numberofnodes"); 28 29 gsize=fem->nodesets->GetGSize(); -
issm/trunk/src/c/parallel/control.cpp
r872 r962 153 153 154 154 //some temporary saving 155 if (((n+1)%5)==0){155 /*if (((n+1)%5)==0){ 156 156 _printf_("%s\n"," saving temporary results..."); 157 157 inputs->Add(control_type,param_g,2,numberofnodes); … … 161 161 OutputControl(u_g,param_g,J,nsteps,&femmodel,outputfilename); 162 162 _printf_("%s\n"," done."); 163 } 163 }*/ 164 164 165 165 _printf_("%s%i%s%g\n"," value of misfit J after optimization #",n,": ",J[n]); -
issm/trunk/src/c/parallel/diagnostic.cpp
r804 r962 23 23 char* outputfilename=NULL; 24 24 char* lockname=NULL; 25 char* qmuinname=NULL;26 char* qmuoutname=NULL;27 char* qmuerrname=NULL;28 25 int numberofnodes; 29 26 int qmu_analysis=0; … … 57 54 outputfilename=argv[3]; 58 55 lockname=argv[4]; 59 qmuinname=argv[5];60 qmuoutname=argv[6];61 qmuerrname=argv[7];62 56 63 57 /*Open handle to data on disk: */ … … 105 99 106 100 #ifdef _HAVE_DAKOTA_ 107 qmu(qmuinname,qmuoutname,qmuerrname,&femmodels[0],inputs,DiagnosticAnalysisEnum(),NoneAnalysisEnum());101 Qmux(&femmodels[0],inputs,DiagnosticAnalysisEnum(),NoneAnalysisEnum()); 108 102 #else 109 103 throw ErrorException(__FUNCT__," Dakota not present, cannot do qmu!"); -
issm/trunk/src/c/parallel/parallel.h
r823 r962 42 42 void CreateFemModel(FemModel* femmodel,ConstDataHandle MODEL,char* analysis_type,char* sub_analysis_type); 43 43 //int BatchDebug(Mat* Kgg,Vec* pg,FemModel* femmodel,char* filename); 44 void qmu(const char* dakota_input_file,const char* dakota_output_file,const char* dakota_error_file,FemModel* femmodels,ParameterInputs* inputs,int analysis_type,int sub_analysis_type);45 void SpawnCore(double* responses,double* variables,char** variable_descriptors,int numvariables, FemModel* femmodels,ParameterInputs* inputs,int analysis_type,int sub_analysis_type,int counter);46 void DakotaResponses(double* responses,char** responses_descriptors,int numresponses,FemModel* femmodels, DataSet* results,int analysis_type,int sub_analysis_type);47 44 void ProcessResults(DataSet** presults,FemModel* fems,int analysis_type); 48 45 -
issm/trunk/src/c/parallel/prognostic.cpp
r767 r962 23 23 char* outputfilename=NULL; 24 24 char* lockname=NULL; 25 char* qmuinname=NULL;26 char* qmuoutname=NULL;27 char* qmuerrname=NULL;28 25 int numberofnodes; 29 26 int waitonlock=0; … … 62 59 outputfilename=argv[3]; 63 60 lockname=argv[4]; 64 qmuinname=argv[5];65 qmuoutname=argv[6];66 qmuerrname=argv[7];67 61 68 62 /*Open handle to data on disk: */ … … 106 100 107 101 #ifdef _HAVE_DAKOTA_ 108 qmu(qmuinname,qmuoutname,qmuerrname,&fem,inputs,PrognosticAnalysisEnum(),NoneAnalysisEnum());102 Qmux(&fem,inputs,PrognosticAnalysisEnum(),NoneAnalysisEnum()); 109 103 #else 110 104 throw ErrorException(__FUNCT__," Dakota not present, cannot do qmu!"); -
issm/trunk/src/c/parallel/thermal.cpp
r765 r962 24 24 char* outputfilename=NULL; 25 25 char* lockname=NULL; 26 char* qmuinname=NULL;27 char* qmuoutname=NULL;28 char* qmuerrname=NULL;29 26 int qmu_analysis=0; 30 27 int numberofnodes; … … 61 58 outputfilename=argv[3]; 62 59 lockname=argv[4]; 63 qmuinname=argv[5];64 qmuoutname=argv[6];65 qmuerrname=argv[7];66 60 67 61 /*Open handle to data on disk: */ … … 114 108 115 109 #ifdef _HAVE_DAKOTA_ 116 qmu(qmuinname,qmuoutname,qmuerrname,&femmodels[0],inputs,ThermalAnalysisEnum(),NoneAnalysisEnum());110 Qmux(&femmodels[0],inputs,ThermalAnalysisEnum(),NoneAnalysisEnum()); 117 111 #else 118 112 throw ErrorException(__FUNCT__," Dakota not present, cannot do qmu!"); -
issm/trunk/src/c/parallel/transient.cpp
r902 r962 23 23 char* outputfilename=NULL; 24 24 char* lockname=NULL; 25 char* qmuinname=NULL;26 char* qmuoutname=NULL;27 char* qmuerrname=NULL;28 25 int numberofnodes; 29 26 int qmu_analysis=0; … … 62 59 outputfilename=argv[3]; 63 60 lockname=argv[4]; 64 qmuinname=argv[5];65 qmuoutname=argv[6];66 qmuerrname=argv[7];67 61 68 62 /*Open handle to data on disk: */ … … 124 118 125 119 #ifdef _HAVE_DAKOTA_ 126 qmu(qmuinname,qmuoutname,qmuerrname,&femmodels[0],inputs,TransientAnalysisEnum(),NoneAnalysisEnum());120 Qmux(&femmodels[0],inputs,TransientAnalysisEnum(),NoneAnalysisEnum()); 127 121 #else 128 122 throw ErrorException(__FUNCT__," Dakota not present, cannot do qmu!"); -
issm/trunk/src/mex/Makefile.am
r847 r962 29 29 PenaltySystemMatrices\ 30 30 ProcessParams\ 31 Qmu\ 31 32 Reduceloadfromgtof\ 32 33 Reducematrixfromgtof\ … … 48 49 endif 49 50 50 LDADD = $(MEXLIB) $(TRIANGLELIB) $(PETSCLIB) ../c/libISSM.a $( METISLIB)51 LDADD = $(MEXLIB) $(TRIANGLELIB) $(PETSCLIB) ../c/libISSM.a $(DAKOTALIB) $(METISLIB) $(FLIBS) 51 52 52 53 AM_LDFLAGS = $(MEXLINK) … … 142 143 ProcessParams/ProcessParams.h 143 144 145 Qmu_SOURCES = Qmu/Qmu.cpp\ 146 Qmu/Qmu.h 147 144 148 PenaltySystemMatrices_SOURCES = PenaltySystemMatrices/PenaltySystemMatrices.cpp\ 145 149 PenaltySystemMatrices/PenaltySystemMatrices.h -
issm/trunk/src/mex/ModelProcessor/ModelProcessor.cpp
r465 r962 33 33 /*Create elements, nodes and materials: */ 34 34 CreateDataSets(&elements,&nodes,&materials,&constraints, &loads, ¶meters, model,MODEL); 35 36 35 /*Write output data: */ 37 36 WriteData(ELEMENTS,elements,0,0,"DataSet",NULL); … … 41 40 WriteData(MATERIALS,materials,0,0,"DataSet",NULL); 42 41 WriteData(PARAMETERS,parameters,0,0,"DataSet",NULL); 42 43 43 44 44 /*Free ressources: */
Note:
See TracChangeset
for help on using the changeset viewer.