/*!\file: transient.cpp * \brief: transient solution */ #include "../issm.h" #include "./parallel.h" #undef __FUNCT__ #define __FUNCT__ "transient" #ifdef HAVE_CONFIG_H #include "config.h" #else #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!" #endif int main(int argc,char* *argv){ /*I/O: */ FILE* fid=NULL; char* inputfilename=NULL; char* outputfilename=NULL; char* lockname=NULL; int numberofnodes; int qmu_analysis=0; double waitonlock=0; /*Model: */ Model* model=NULL; int dim=-1; /*Results: */ DataSet* results=NULL; DataSet* processed_results=NULL; Result* result=NULL; ParameterInputs* inputs=NULL; /*inputs: */ double* u_g=NULL; double* m_g=NULL; double* a_g=NULL; double dt; double yts; Param* param=NULL; /*time*/ double start, finish; double start_core, finish_core; double start_init, finish_init; MODULEBOOT(); #if !defined(_PARALLEL_) || (defined(_PARALLEL_) && !defined(_HAVE_PETSC_)) throw ErrorException(__FUNCT__," parallel executable was compiled without support of parallel libraries!"); #endif /*Initialize Petsc and get start time*/ PetscInitialize(&argc,&argv,(char *)0,""); MPI_Barrier(MPI_COMM_WORLD); start=MPI_Wtime(); /*Size and rank: */ MPI_Comm_rank(MPI_COMM_WORLD,&my_rank); MPI_Comm_size(MPI_COMM_WORLD,&num_procs); _printf_("recover , input file name and output file name:\n"); inputfilename=argv[2]; outputfilename=argv[3]; lockname=argv[4]; /*Open handle to data on disk: */ fid=pfopen(inputfilename,"rb"); /*Initialize model structure: */ MPI_Barrier(MPI_COMM_WORLD); start_init=MPI_Wtime(); model=new Model(); _printf_("read and create finite element model:\n"); _printf_("\n reading diagnostic horiz model data:\n"); model->AddFormulation(fid,DiagnosticAnalysisEnum(),HorizAnalysisEnum()); _printf_("\n reading diagnostic vert model data:\n"); model->AddFormulation(fid,DiagnosticAnalysisEnum(),VertAnalysisEnum()); _printf_("\n reading diagnostic stokes model data:\n"); model->AddFormulation(fid,DiagnosticAnalysisEnum(),StokesAnalysisEnum()); _printf_("\n reading diagnostic hutter model data:\n"); model->AddFormulation(fid,DiagnosticAnalysisEnum(),HutterAnalysisEnum()); _printf_("\n reading surface and bed slope computation model data:\n"); model->AddFormulation(fid,SlopecomputeAnalysisEnum()); _printf_("\n reading prognositc model data:\n"); model->AddFormulation(fid,PrognosticAnalysisEnum()); /*Do we run in 3d?, in which case we need thermal and melting also:*/ model->FindParam(&dim,"dim"); if(dim==3){ _printf_("read and create thermal finite element model:\n"); model->AddFormulation(fid,ThermalAnalysisEnum(),TransientAnalysisEnum()); _printf_("read and create melting finite element model:\n"); model->AddFormulation(fid,MeltingAnalysisEnum(),TransientAnalysisEnum()); } /*recover parameters: */ model->FindParam(&waitonlock,"waitonlock"); model->FindParam(&qmu_analysis,"qmu_analysis"); _printf_("initialize inputs:\n"); model->FindParam(&u_g,NULL,NULL,"u_g",PrognosticAnalysisEnum()); model->FindParam(&m_g,NULL,NULL,"m_g",PrognosticAnalysisEnum()); model->FindParam(&a_g,NULL,NULL,"a_g",PrognosticAnalysisEnum()); model->FindParam(&numberofnodes,"numberofnodes"); model->FindParam(&dt,"dt"); model->FindParam(&yts,"yts"); inputs=new ParameterInputs; inputs->Add("velocity",u_g,3,numberofnodes); inputs->Add("melting",m_g,1,numberofnodes); inputs->Add("accumulation",a_g,1,numberofnodes); inputs->Add("dt",dt*yts); _printf_("initialize results:\n"); results=new DataSet(ResultsEnum()); MPI_Barrier(MPI_COMM_WORLD); finish_init=MPI_Wtime(); /*are we running the solution sequence, or a qmu wrapper around it? : */ if(!qmu_analysis){ /*run diagnostic analysis: */ _printf_("call computational core:\n"); MPI_Barrier(MPI_COMM_WORLD); start_core=MPI_Wtime( ); transient_core(results,model,inputs); MPI_Barrier(MPI_COMM_WORLD); finish_core=MPI_Wtime( ); /*Add analysis_type to results: */ result=new Result(results->Size()+1,0,1,"analysis_type","transient"); results->AddObject(result); _printf_("process results:\n"); ProcessResults(&processed_results,results,model,TransientAnalysisEnum()); _printf_("write results to disk:\n"); OutputResults(processed_results,outputfilename); } else{ /*run qmu analysis: */ _printf_("calling qmu analysis on transient core:\n"); #ifdef _HAVE_DAKOTA_ MPI_Barrier(MPI_COMM_WORLD); start_core=MPI_Wtime( ); Qmux(model,inputs,TransientAnalysisEnum(),NoneAnalysisEnum()); MPI_Barrier(MPI_COMM_WORLD); finish_core=MPI_Wtime( ); #else throw ErrorException(__FUNCT__," Dakota not present, cannot do qmu!"); #endif } if (waitonlock>0){ _printf_("write lock file:\n"); WriteLockFile(lockname); } /*Free ressources:*/ delete results; delete processed_results; delete model; delete inputs; xfree((void**)&u_g); xfree((void**)&m_g); xfree((void**)&a_g); /*Get finish time and close*/ MPI_Barrier(MPI_COMM_WORLD); finish = MPI_Wtime( ); _printf_("\n %-34s %f seconds \n","Model initialization elapsed time:",finish_init-start_init); _printf_(" %-34s %f seconds \n","Core solution elapsed time:",finish_core-start_core); _printf_(" %-34s %f seconds\n\n","Total elapsed time:",finish-start); _printf_("closing MPI and Petsc\n"); PetscFinalize(); /*end module: */ MODULEEND(); return 0; //unix success return; }