Index: /issm/trunk/src/c/Makefile.am
===================================================================
--- /issm/trunk/src/c/Makefile.am	(revision 666)
+++ /issm/trunk/src/c/Makefile.am	(revision 667)
@@ -546,7 +546,9 @@
 					./parallel/SpawnCore.cpp\
 					./parallel/ProcessResults.cpp\
+					./parallel/prognostic_core.cpp\
 					./parallel/qmu.cpp\
 					./parallel/DakotaResponses.cpp\
-					./parallel/OutputResults.cpp
+					./parallel/OutputResults.cpp\
+					./parallel/OutputControl.cpp
 
 libpISSM_a_CXXFLAGS = -fPIC -D_PARALLEL_   -D_C_
Index: /issm/trunk/src/c/parallel/ProcessResults.cpp
===================================================================
--- /issm/trunk/src/c/parallel/ProcessResults.cpp	(revision 666)
+++ /issm/trunk/src/c/parallel/ProcessResults.cpp	(revision 667)
@@ -33,5 +33,5 @@
 	DataSet* newresults=NULL;
 
-	/*fem models: */
+	/*fem diagnostic models: */
 	FemModel* fem_dh=NULL;
 	FemModel* fem_dv=NULL;
@@ -39,4 +39,7 @@
 	FemModel* fem_ds=NULL;
 	FemModel* fem_sl=NULL;
+
+	/*fem prognostic models: */
+	FemModel* fem_p=NULL;
 
 	int ishutter;
@@ -57,4 +60,8 @@
 	double  yts;
 
+	Vec     h_g=NULL;
+	double* h_g_serial=NULL;
+	double* thickness=NULL;
+
 	int numberofnodes;
 
@@ -80,4 +87,7 @@
 	}
 
+	if(analysis_type==PrognosticAnalysisEnum()){
+		fem_p=fems+0;
+	}
 	
 
@@ -185,4 +195,25 @@
 			xfree((void**)&partition);
 		}
+		else if(strcmp(result->GetFieldName(),"h_g")==0){
+			/*easy, h_g is of size numberofnodes, on 1 dof, just repartition: */
+			result->GetField(&h_g);
+			VecToMPISerial(&h_g_serial,h_g);
+			fem_p->parameters->FindParam((void*)&numberofnodes,"numberofnodes");
+			VecToMPISerial(&partition,fem_p->partition);
+
+			thickness=(double*)xmalloc(numberofnodes*sizeof(double));
+
+			for(i=0;i<numberofnodes;i++){
+				thickness[i]=h_g_serial[(int)partition[i]];
+			}
+			
+			/*Ok, add pressure to newresults: */
+			newresult=new Result(newresults->Size()+1,result->GetTime(),result->GetStep(),"thickness",thickness,numberofnodes);
+			newresults->AddObject(newresult);
+
+			/*do some cleanup: */
+			xfree((void**)&h_g_serial);
+			xfree((void**)&partition);
+		}
 	}
 
Index: /issm/trunk/src/c/parallel/control.cpp
===================================================================
--- /issm/trunk/src/c/parallel/control.cpp	(revision 666)
+++ /issm/trunk/src/c/parallel/control.cpp	(revision 667)
@@ -158,5 +158,5 @@
 			inputs->Add("fit",fit[n]);
 			diagnostic_core_nonlinear(&u_g,NULL,NULL,&femmodel,inputs,analysis_type,sub_analysis_type);
-			//OutputControl(u_g,p_g,J,nsteps,femmodel.partition,outputfilename,femmodel.nodesets);
+			OutputControl(u_g,p_g,J,nsteps,&femmodel,outputfilename);
 			_printf_("%s\n","      done.");
 		}
@@ -179,5 +179,5 @@
 	
 	_printf_("%s\n","      saving final results...");
-	//OutputControl(u_g,p_g,J,nsteps,femmodel.partition,outputfilename,femmodel.nodesets);
+	OutputControl(u_g,p_g,J,nsteps,&femmodel,outputfilename);
 	_printf_("%s\n","      done.");
 
@@ -188,7 +188,4 @@
 			
 	_printf_("closing MPI and Petsc\n");
-	MPI_Barrier(MPI_COMM_WORLD);
-
-	/*Close MPI libraries: */
 	PetscFinalize(); 
 
Index: /issm/trunk/src/c/parallel/diagnostic_core.cpp
===================================================================
--- /issm/trunk/src/c/parallel/diagnostic_core.cpp	(revision 666)
+++ /issm/trunk/src/c/parallel/diagnostic_core.cpp	(revision 667)
@@ -4,5 +4,5 @@
 
 #undef __FUNCT__ 
-#define __FUNCT__ "cielodiagnostic_core"
+#define __FUNCT__ "diagnostic_core"
 
 #include "../toolkits/toolkits.h"
Index: /issm/trunk/src/c/parallel/diagnostic_core_nonlinear.cpp
===================================================================
--- /issm/trunk/src/c/parallel/diagnostic_core_nonlinear.cpp	(revision 666)
+++ /issm/trunk/src/c/parallel/diagnostic_core_nonlinear.cpp	(revision 667)
@@ -4,5 +4,5 @@
 
 #undef __FUNCT__ 
-#define __FUNCT__ "cielodiagnostic_core_nonlinear"
+#define __FUNCT__ "diagnostic_core_nonlinear"
 
 #include "../toolkits/toolkits.h"
@@ -161,5 +161,5 @@
 	}
 
-	//more output might be needed, when running in cielocontrol.c
+	//more output might be needed, when running in control.c
 	if(pKff0){
 
Index: /issm/trunk/src/c/parallel/parallel.h
===================================================================
--- /issm/trunk/src/c/parallel/parallel.h	(revision 666)
+++ /issm/trunk/src/c/parallel/parallel.h	(revision 667)
@@ -12,4 +12,5 @@
 
 void diagnostic_core(DataSet* results,FemModel* fems, ParameterInputs* inputs);
+void prognostic_core(DataSet* results,FemModel* fems, ParameterInputs* inputs);
 
 void thermal_core(DataSet* results,FemModel* fems, ParameterInputs* inputs);
@@ -32,4 +33,5 @@
 //int ParameterUpdate(double* search_vector,int step, WorkspaceParams* workspaceparams,BatchParams* batchparams);
 void OutputResults(DataSet* results,char* filename);
+void OutputControl(Vec u_g,double* p_g,double* J,int nsteps,FemModel* fem,char* outputfilename);
 void WriteLockFile(char* filename);
 
Index: /issm/trunk/src/c/parallel/prognostic.cpp
===================================================================
--- /issm/trunk/src/c/parallel/prognostic.cpp	(revision 666)
+++ /issm/trunk/src/c/parallel/prognostic.cpp	(revision 667)
@@ -23,8 +23,12 @@
 	char* outputfilename=NULL;
 	char* lockname=NULL;
+	char* qmuinname=NULL;
+	char* qmuoutname=NULL;
+	char* qmuerrname=NULL;
 	int   numberofnodes;
 	int waitonlock=0;
 
 	FemModel fem;
+
 	Vec h_g=NULL;
 	Vec u_g=NULL;
@@ -34,4 +38,8 @@
 	double* accumulation_g=NULL;
 	double  dt;
+	int     qmu_analysis;
+
+	/*Results: */
+	DataSet* results=NULL;
 
 	
@@ -55,4 +63,7 @@
 	outputfilename=argv[3];
 	lockname=argv[4];
+	qmuinname=argv[5];
+	qmuoutname=argv[6];
+	qmuerrname=argv[7];
 
 	/*Open handle to data on disk: */
@@ -70,11 +81,7 @@
 	fem.parameters->FindParam((void*)&numberofnodes,"numberofnodes");
 
-	_printf_("depth averaging velocity...");
-	u_g=SerialToVec(u_g_serial,numberofnodes*3); xfree((void**)&u_g_serial);//vx,vy and vz should be present at this point.
-	VelocityDepthAveragex( u_g, fem.elements,fem.nodes, fem.loads, fem.materials);
-
 	_printf_("initialize inputs:\n");
 	inputs=new ParameterInputs;
-	inputs->Add("velocity_average",u_g,3,numberofnodes);
+	inputs->Add("velocity",u_g_serial,3,numberofnodes);
 	inputs->Add("thickness",h_g_initial,1,numberofnodes);
 	inputs->Add("melting",melting_g,1,numberofnodes);
@@ -82,14 +89,30 @@
 	inputs->Add("dt",dt);
 
-	/*lighten up on parameters : to be done */
+	/*are we running the solutoin sequence, or a qmu wrapper around it? : */
+	fem.parameters->FindParam((void*)&qmu_analysis,"qmu_analysis");
+	if(!qmu_analysis){
 
-	_printf_("call computational core:\n");
-	diagnostic_core_linear(&h_g,&fem,inputs,PrognosticAnalysisEnum(),NoneAnalysisEnum());
+		/*run prognostic analysis: */
+		_printf_("call computational core:\n");
+		prognostic_core(results,&fem,inputs);
 
-	_printf_("extrude computed thickness on all layers:\n");
-	ThicknessExtrudex( h_g, fem.elements,fem.nodes, fem.loads, fem.materials);
+	}
+	else{
+		/*run qmu analysis: */
+		_printf_("calling qmu analysis on prognostic core:\n");
+	
+		#ifdef _HAVE_DAKOTA_ 
+		qmu(qmuinname,qmuoutname,qmuerrname,&fem,inputs,PrognosticAnalysisEnum(),NoneAnalysisEnum());
+	 	#else
+		throw ErrorException(__FUNCT__," Dakota not present, cannot do qmu!");
+		#endif
+	}
 
+
+		_printf_("process results:\n");
+	ProcessResults(&results,&fem,DiagnosticAnalysisEnum());
+	
 	_printf_("write results to disk:\n");
-	//OutputPrognostic(h_g,&fem,outputfilename);
+	OutputResults(results,outputfilename);
 
 	_printf_("write lock file:\n");
@@ -100,9 +123,5 @@
 		
 	_printf_("closing MPI and Petsc\n");
-	MPI_Barrier(MPI_COMM_WORLD);
-
-	/*Close MPI libraries: */
 	PetscFinalize(); 
-
 
 	/*end module: */
Index: /issm/trunk/src/c/parallel/thermal.cpp
===================================================================
--- /issm/trunk/src/c/parallel/thermal.cpp	(revision 666)
+++ /issm/trunk/src/c/parallel/thermal.cpp	(revision 667)
@@ -24,4 +24,8 @@
 	char* outputfilename=NULL;
 	char* lockname=NULL;
+	char* qmuinname=NULL;
+	char* qmuoutname=NULL;
+	char* qmuerrname=NULL;
+	int   qmu_analysis=0;
 	int   numberofnodes;
 
@@ -57,4 +61,7 @@
 	outputfilename=argv[3];
 	lockname=argv[4];
+	qmuinname=argv[5];
+	qmuoutname=argv[6];
+	qmuerrname=argv[7];
 
 	/*Open handle to data on disk: */
@@ -89,11 +96,28 @@
 	femmodels[1].parameters->DeleteObject((Object*)param);
 
+	/*are we running the solutoin sequence, or a qmu wrapper around it? : */
+	femmodels[0].parameters->FindParam((void*)&qmu_analysis,"qmu_analysis");
+	if(!qmu_analysis){
+
+		/*run thermal analysis: */
+		_printf_("call computational core:\n");
+		thermal_core(results,femmodels,inputs);
+
+	}
+	else{
+		/*run qmu analysis: */
+		_printf_("calling qmu analysis on thermal core:\n");
 	
-	_printf_("call computational core:\n");
-	thermal_core(results,femmodels,inputs);
+		#ifdef _HAVE_DAKOTA_ 
+		qmu(qmuinname,qmuoutname,qmuerrname,&femmodels[0],inputs,ThermalAnalysisEnum(),NoneAnalysisEnum());
+	 	#else
+		throw ErrorException(__FUNCT__," Dakota not present, cannot do qmu!");
+		#endif
+	}
 
+	
 	_printf_("process results:\n");
-	ProcessResults(&results,&femmodels[0],ThermalAnalysisEnum());
-
+	ProcessResults(&results,&femmodels[0],DiagnosticAnalysisEnum());
+	
 	_printf_("write results to disk:\n");
 	OutputResults(results,outputfilename);
@@ -107,7 +131,4 @@
 		
 	_printf_("closing MPI and Petsc\n");
-	MPI_Barrier(MPI_COMM_WORLD);
-
-	/*Close MPI libraries: */
 	PetscFinalize(); 
 
