/*!\file: surfaceslope.cpp * \brief: surfaceslope computation solution */ #ifdef HAVE_CONFIG_H #include "config.h" #else #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!" #endif #include "../objects/objects.h" #include "../shared/shared.h" #include "../Container/Container.h" #include "../EnumDefinitions/EnumDefinitions.h" #include "../include/include.h" #include "../modules/modules.h" #include "./solutions.h" int main(int argc,char* *argv){ /*I/O: */ FILE* fid=NULL; char* inputfilename=NULL; char* outputfilename=NULL; char* lockname=NULL; bool qmu_analysis=false; /*FemModel: */ FemModel* femmodel=NULL; /*Results: */ bool waitonlock=false; /*time*/ double start, finish; double start_core, finish_core; double start_init, finish_init; int analyses[1]={SurfaceSlopeAnalysisEnum}; int solution_type=SurfaceSlopeAnalysisEnum; MODULEBOOT(); #if !defined(_PARALLEL_) || (defined(_PARALLEL_) && !defined(_HAVE_PETSC_)) ISSMERROR(" parallel executable was compiled without support of parallel libraries!"); #endif /*Initialize Petsc and get start time*/ PetscInitialize(&argc,&argv,(char *)0,""); MPI_Barrier(MPI_COMM_WORLD); start=MPI_Wtime(); /*Size and rank: */ MPI_Comm_rank(MPI_COMM_WORLD,&my_rank); MPI_Comm_size(MPI_COMM_WORLD,&num_procs); _printf_("recover , input file name and output file name:\n"); inputfilename=argv[2]; outputfilename=argv[3]; lockname=argv[4]; /*Initialize femmodel structure: */ MPI_Barrier(MPI_COMM_WORLD); start_init=MPI_Wtime(); /*Open handle to data on disk: */ fid=pfopen(inputfilename,"rb"); _printf_("create finite element model, using analyses types statically defined above:\n"); femmodel=new FemModel(fid,solution_type,analyses,1); /*add outputfilename in parameters: */ femmodel->parameters->AddObject(new StringParam(OutputFileNameEnum,outputfilename)); /*get parameters: */ femmodel->parameters->FindParam(&qmu_analysis,QmuAnalysisEnum); femmodel->parameters->FindParam(&waitonlock,WaitOnLockEnum); MPI_Barrier(MPI_COMM_WORLD); finish_init=MPI_Wtime(); /*are we running the solution sequence, or a qmu wrapper around it? : */ if(!qmu_analysis){ _printf_("call computational core:\n"); MPI_Barrier(MPI_COMM_WORLD); start_core=MPI_Wtime( ); surfaceslope_core(femmodel); MPI_Barrier(MPI_COMM_WORLD); finish_core=MPI_Wtime( ); _printf_("write results to disk:\n"); OutputResultsx(femmodel->elements, femmodel->nodes, femmodel->vertices, femmodel->loads, femmodel->materials, femmodel->parameters,femmodel->results); } else{ /*run qmu analysis: */ _printf_("calling qmu analysis on surface slope core:\n"); #ifdef _HAVE_DAKOTA_ MPI_Barrier(MPI_COMM_WORLD); start_core=MPI_Wtime( ); Qmux(femmodel,SurfaceSlopeAnalysisEnum,NoneAnalysisEnum); MPI_Barrier(MPI_COMM_WORLD); finish_core=MPI_Wtime( ); #else ISSMERROR(" Dakota not present, cannot do qmu!"); #endif } if (waitonlock>0){ _printf_("write lock file:\n"); WriteLockFile(lockname); } /*Free ressources */ delete femmodel; /*Get finish time and close*/ MPI_Barrier(MPI_COMM_WORLD); finish = MPI_Wtime( ); _printf_("\n %-34s %f seconds \n","FemModel initialization elapsed time:",finish_init-start_init); _printf_(" %-34s %f seconds \n","Core solution elapsed time:",finish_core-start_core); _printf_("\n %s %i hrs %i min %i sec\n\n","Total elapsed time:",int((finish-start)/3600),int(int(finish-start)%3600/60),int(finish-start)%60); _printf_("closing MPI and Petsc\n"); PetscFinalize(); /*end module: */ MODULEEND(); return 0; //unix success return; }