Index: /issm/trunk-jpl/src/c/Makefile.am
===================================================================
--- /issm/trunk-jpl/src/c/Makefile.am	(revision 19631)
+++ /issm/trunk-jpl/src/c/Makefile.am	(revision 19632)
@@ -263,5 +263,7 @@
 #DAKOTA sources  {{{
 if DAKOTA
-issm_sources +=  ./modules/InputUpdateFromDakotax/InputUpdateFromDakotax.cpp\
+issm_sources +=       ./classes/Dakota/IssmDirectApplicInterface.h\
+					  ./classes/Dakota/IssmParallelDirectApplicInterface.h\
+					  ./modules/InputUpdateFromDakotax/InputUpdateFromDakotax.cpp\
 					  ./modules/InputUpdateFromVectorDakotax/InputUpdateFromVectorDakotax.cpp\
 					  ./modules/InputUpdateFromMatrixDakotax/InputUpdateFromMatrixDakotax.cpp\
@@ -269,6 +271,5 @@
 					  ./modules/ModelProcessorx/Dakota/CreateParametersDakota.cpp\
 					  ./modules/ModelProcessorx/Dakota/UpdateElementsAndMaterialsDakota.cpp\
-					  ./cores/dakota_core.cpp\
-					  ./cores/DakotaSpawnCore.cpp
+					  ./cores/dakota_core.cpp
 endif
 #}}}
@@ -659,4 +660,12 @@
 kriging_CXXFLAGS= -fPIC $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS) 
 endif
+
+if DAKOTA
+bin_PROGRAMS += issm_dakota
+issm_dakota_SOURCES = main/issm_dakota.cpp
+issm_dakota_CXXFLAGS= -fPIC $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS) 
+issm_dakota_LDADD= $(LDADD)
+endif
+
 #}}}
 #Automatic differentiation (must be done at the end) {{{
Index: /issm/trunk-jpl/src/c/classes/Dakota/IssmDirectApplicInterface.h
===================================================================
--- /issm/trunk-jpl/src/c/classes/Dakota/IssmDirectApplicInterface.h	(revision 19632)
+++ /issm/trunk-jpl/src/c/classes/Dakota/IssmDirectApplicInterface.h	(revision 19632)
@@ -0,0 +1,133 @@
+/*!\file:  IssmDirectApplicInterface.* This code is only valid for Dakota versions lower than 5!
+ *
+ * \brief: derived DirectApplicInterface class declaration and implementation, taylored to the ISSM cores. 
+ * This class is registered into the interface database of Dakota, and is used to directly call ISSM cores 
+ * from Dakota. 
+ *
+ * This routine helps running ISSM and Dakota in library mode, for Dakota versions that are <=5, and which 
+ * do not really support parallelism. In library mode, Dakota does not 
+ * run as an execuatble. Its capabilities are linked into the ISSM software, and ISSM calls dakota routines 
+ * directly from the dakota library. dakota_core.cpp is the code that is in charge of calling those routines. 
+ *
+ * Prior to versions 6 and more, Dakota had its own way of running in parallel (for embarassingly parallel jobs). 
+ * We do not want that, as ISSM knows exactly how to run "really parallel" jobs that use all CPUS. To bypass Dakota's parallelism, 
+ * we overloaded the constructor for the parallel library (see the Dakota patch in the externalpackages/dakota
+ * directory). This overloaded constructor fires up Dakota serially on CPU 0 only! We take care of broadcasting 
+ * to the other CPUS, hence ISSM is running in parallel, and Dakota serially on CPU0. 
+ *
+ * Now, how does CPU 0 drive all other CPUS to carry out sensitivity analysese? By synchronizing its call to 
+ * our ISSM cores (stressbalance_core, thermal_core, transient_core, etc ...) on CPU 0 with all other CPUS. 
+ * This explains the structure of dakota_core.cpp, where cpu 0 runs Dakota, the Dakota pluggin fires up DakotaSpawnCore.cpp, 
+ * while the other CPUS are waiting for a broadcast from CPU0, once they get it, they also fire up 
+ * DakotaSpawnCore. In the end, DakotaSpawnCore is fired up on all CPUS, with CPU0 having Dakota inputs, that it will 
+ * broacast to other CPUS. 
+ *
+ * Now, how does dakota call the DakotaSpawnCore routine? The DakotaSpawnCore is embedded into the IssmDirectApplicInterface object 
+ * which is derived from the Direct Interface Dakota objct. This is the only way to run Dakota in library 
+ * mode (see their developper guide for more info). Dakota registers the IssmDirectApplicInterface object into its own 
+ * database, and calls on the embedded DakotaSpawnCore from CPU0. 
+ *
+ */ 
+
+/*Issm Configuration: {{{*/
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif 
+/*}}}*/
+
+#if defined(_HAVE_DAKOTA_) && (_DAKOTA_MAJOR_ < 5 || (_DAKOTA_MAJOR_ == 5 && _DAKOTA_MINOR_ < 3))
+
+/*Dakota include files:{{{*/
+#include <DakotaInterface.H>
+#include <DakotaStrategy.H>
+#include <DakotaModel.H>
+#include <DirectApplicInterface.H>
+#include <DakotaResponse.H>
+#include <ParamResponsePair.H>
+#include <system_defs.h>
+#include <ProblemDescDB.H>
+#include <ParallelLibrary.H>
+/*}}}*/
+
+/*IssmDirectApplicInterface class */
+namespace SIM {
+	class IssmDirectApplicInterface: public Dakota::DirectApplicInterface{
+		public:
+			/*these fields are used by core solutions: */
+			void *femmodel;
+			int   counter;
+			/*Constructors/Destructors*/
+			IssmDirectApplicInterface(const Dakota::ProblemDescDB& problem_db,void* in_femmodel):Dakota::DirectApplicInterface(problem_db){/*{{{*/
+				femmodel = in_femmodel;
+				counter  = 0;
+			}/*}}}*/
+			~IssmDirectApplicInterface(){/*{{{*/
+				/* Virtual destructor handles referenceCount at Interface level. */ 
+			}/*}}}*/
+		protected:
+			/*execute the input filter portion of a direct evaluation invocation*/
+			//int derived_map_if(const Dakota::String& if_name);
+			/*execute an analysis code portion of a direct evaluation invocation*/
+			int derived_map_ac(const Dakota::String& driver){/*{{{*/
+
+				int i;
+				IssmDouble* variables=NULL;
+				char** variable_descriptors=NULL;
+				char*  variable_descriptor=NULL;
+				IssmDouble* responses=NULL;
+
+				/*increae counter: */
+				counter++;
+
+				/*Before launching analysis, we need to transfer the dakota inputs into Issm 
+				 *readable variables: */
+
+				/*First, the variables: */
+				variables=xNew<IssmDouble>(numACV);
+				for(i=0;i<numACV;i++){
+					variables[i]=xC[i];
+				}
+				/*The descriptors: */
+				variable_descriptors=xNew<char*>(numACV);
+				for(i=0;i<numACV;i++){
+					std::string label=xCLabels[i];
+					variable_descriptor=xNew<char>(strlen(label.c_str())+1);
+					memcpy(variable_descriptor,label.c_str(),(strlen(label.c_str())+1)*sizeof(char));
+
+					variable_descriptors[i]=variable_descriptor;
+				}
+
+				/*Initialize responses: */
+				responses=xNewZeroInit<IssmDouble>(numFns);
+
+				/*run core solution: */
+				DakotaSpawnCore(responses,numFns, variables,variable_descriptors,numACV,femmodel,counter);
+
+				/*populate responses: */
+				for(i=0;i<numFns;i++){
+					fnVals[i]=responses[i];
+				}
+
+				/*Free ressources:*/
+				xDelete<IssmDouble>(variables);
+				for(i=0;i<numACV;i++){
+					variable_descriptor=variable_descriptors[i];
+					xDelete<char>(variable_descriptor);
+				}
+				xDelete<char*>(variable_descriptors);
+				xDelete<IssmDouble>(responses);
+
+				return 0;
+			}/*}}}*/
+			/*execute the output filter portion of a direct evaluation invocation*/
+			//int derived_map_of(const Dakota::String& of_name);
+			/*add for issm: */
+			int GetCounter(){/*{{{*/
+				return counter;
+			}/*}}}*/
+		private:
+	};
+} 
+#endif
Index: /issm/trunk-jpl/src/c/classes/Dakota/IssmParallelDirectApplicInterface.h
===================================================================
--- /issm/trunk-jpl/src/c/classes/Dakota/IssmParallelDirectApplicInterface.h	(revision 19632)
+++ /issm/trunk-jpl/src/c/classes/Dakota/IssmParallelDirectApplicInterface.h	(revision 19632)
@@ -0,0 +1,136 @@
+/*!\file:  IssmParallelDirectApplicInterface.h. This code is only valid for Dakota versions higher than 6!
+ *
+ * \brief: derived ParallelDirectApplicInterface class declaration and implementation, taylored to ISSM.
+ * This class is registered into the interface database of Dakota, and is used to directly call ISSM cores 
+ * from Dakota. 
+ *
+ * This routine helps running ISSM and Dakota in library mode, for Dakota versions that are >=6, and that fully 
+ * support parallelism.  The setup is radically different than from version <6! Now, dakota runs the show more. 
+ * The reason is that dakota now controls the parallelism in a master/slave setup, and hands over to ISSM a  bunch 
+ * of slave communicators, which are then used to run our simulations. Because ISSM is now ESMF compliant, we can 
+ * use these communicators to create separate identical FemModel instances on each slave communicator! This allows 
+ * us to scale to large jobs (think 1000's of cpus), which we split into multiple sub-slave communicators, which 
+ * run the sampling (or forward different, local reliability, optimization you name it) simulations on each slave. 
+ * 
+ * This is all bootstraped from the main issm_dakota main, (see c/main directory), which is heavily inspired on the
+ * main found in the dakota/src/library_mode.cpp code. We also have to create an ISSM code that registers into the 
+ * dakota database, which is capable of running ISSM. This is derived from the Dakota class called 
+ * ParallelDirectApplicInterface. 
+ */ 
+#ifndef _ISSMPARALLELDIRECTAPPLICINTERFACE_
+#define _ISSMPARALLELDIRECTAPPLICINTERFACE_
+
+/*Issm Configuration: {{{*/
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+/*}}}*/
+
+#if defined(_HAVE_DAKOTA_) && _DAKOTA_MAJOR_ >= 6
+
+/*Dakota include files: {{{*/
+#include <DirectApplicInterface.hpp>
+/*}}}*/
+
+namespace SIM {
+	class IssmParallelDirectApplicInterface: public Dakota::DirectApplicInterface{
+      
+		public:
+			/*these fields are used by core solutions: */
+			void *femmodel;
+			
+			/*Constructors/Destructors{{{*/
+			IssmParallelDirectApplicInterface(const Dakota::ProblemDescDB& problem_db, const MPI_Comm& analysis_comm, void* in_femmodel):Dakota::DirectApplicInterface(problem_db){
+
+				#ifdef MPI_DEBUG
+				  // For testing purposes, output size/rank of the incoming analysis_comm
+				  int rank, size;
+				  MPI_Comm_rank(analysis_comm, &rank);
+				  MPI_Comm_size(analysis_comm, &size);
+				  Cout << "In SIM::ParallelDirectApplicInterface ctor, rank = " << rank
+					   << " size = " << size << std::endl;
+				 #endif // MPI_DEBUG
+
+				femmodel = in_femmodel;
+			}
+			~IssmParallelDirectApplicInterface(){
+			}
+			/*}}}*/
+		
+		protected:
+			
+			/// execute an analysis code portion of a direct evaluation invocation
+			int derived_map_ac(const Dakota::String& driver){/*{{{*/
+
+				#ifdef MPI_DEBUG
+					Cout << "analysis server " << analysisServerId << " invoking " << ac_name
+						 << " within SIM::ParallelDirectApplicInterface." << std::endl;
+				#endif // MPI_DEBUG
+
+				int i;
+				IssmDouble* variables=NULL;
+				char** variable_descriptors=NULL;
+				char*  variable_descriptor=NULL;
+				IssmDouble* responses=NULL;
+
+				/*Before launching analysis, we need to transfer the dakota inputs into Issm 
+				 *readable variables: */
+
+				/*First, the variables: */
+				variables=xNew<IssmDouble>(numACV);
+				for(i=0;i<numACV;i++){
+					variables[i]=xC[i];
+				}
+				/*The descriptors: */
+				variable_descriptors=xNew<char*>(numACV);
+				for(i=0;i<numACV;i++){
+					std::string label=xCLabels[i];
+					variable_descriptor=xNew<char>(strlen(label.c_str())+1);
+					memcpy(variable_descriptor,label.c_str(),(strlen(label.c_str())+1)*sizeof(char));
+
+					variable_descriptors[i]=variable_descriptor;
+				}
+
+				/*Initialize responses: */
+				responses=xNewZeroInit<IssmDouble>(numFns);
+
+				/*run core solution: */
+				//DakotaSpawnCore(responses,numFns, variables,variable_descriptors,numACV,femmodel);
+
+				/*populate responses: */
+				for(i=0;i<numFns;i++){
+					fnVals[i]=responses[i];
+				}
+
+				/*Free ressources:*/
+				xDelete<IssmDouble>(variables);
+				for(i=0;i<numACV;i++){
+					variable_descriptor=variable_descriptors[i];
+					xDelete<char>(variable_descriptor);
+				}
+				xDelete<char*>(variable_descriptors);
+				xDelete<IssmDouble>(responses);
+
+				return 0;
+			}/*}}}*/
+
+			/// no-op hides base error; job batching occurs within wait_local_evaluations()
+			//void derived_map_asynch(const Dakota::ParamResponsePair& pair){};
+
+			/// evaluate the batch of jobs contained in prp_queue
+			//void wait_local_evaluations(Dakota::PRPQueue& prp_queue);
+
+			/// invokes wait_local_evaluations() (no special nowait support)
+			//void test_local_evaluations(Dakota::PRPQueue& prp_queue) { wait_local_evaluations(prp_queue); };
+
+			/// no-op hides default run-time error checks at DirectApplicInterface level
+			//void set_communicators_checks(int max_eval_concurrency){};
+
+		private:
+	};
+}
+/*}}}*/
+#endif
+#endif
Index: /issm/trunk-jpl/src/c/classes/classes.h
===================================================================
--- /issm/trunk-jpl/src/c/classes/classes.h	(revision 19631)
+++ /issm/trunk-jpl/src/c/classes/classes.h	(revision 19632)
@@ -113,4 +113,8 @@
 #include "./kriging/krigingobjects.h"
 
+/*dakota:*/
+#include "./Dakota/IssmDirectApplicInterface.h"
+#include "./Dakota/IssmParallelDirectApplicInterface.h"
+
 /*diverse: */
 #include "./Hook.h"
Index: sm/trunk-jpl/src/c/cores/DakotaSpawnCore.cpp
===================================================================
--- /issm/trunk-jpl/src/c/cores/DakotaSpawnCore.cpp	(revision 19631)
+++ 	(revision )
@@ -1,191 +1,0 @@
-/*!\file:  DakotaSpawnCore.cpp
-
- * \brief: run core ISSM solution using Dakota inputs coming from CPU 0.
- * \sa qmu.cpp DakotaPlugin.cpp
- *
- * This routine needs to be understood simultaneously with qmu.cpp and DakotaPlugin. 
- * DakotaSpawnCoreParallel is called by all CPUS, with CPU 0 holding Dakota variable values, along 
- * with variable descriptors. 
- *
- * DakotaSpawnCoreParallel takes care of broadcasting the variables and their descriptors across the MPI 
- * ring. Once this is done, we use the variables to modify the inputs for the solution core. 
- * For ex, if "rho_ice" is provided, for ex 920, we include "rho_ice" in the inputs, then 
- * call the core with the modified inputs. This is the way we get Dakota to explore the parameter 
- * spce of the core. 
- *
- * Once the core is called, we process the results of the core, and using the processed results, 
- * we compute response functions. The responses are computed on all CPUS, but they are targeted 
- * for CPU 0, which will get these values back to the Dakota engine. 
- *
- */ 
-
-/*Includes and prototypes: {{{*/
-#ifdef HAVE_CONFIG_H
-	#include <config.h>
-#else
-#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
-#endif
-
-#include "./cores.h"
-#include "../classes/classes.h"
-#include "../shared/shared.h"
-#include "../modules/modules.h"
-
-void DakotaMPI_Bcast(double** pvariables, char*** pvariables_descriptors,int* pnumvariables, int* pnumresponses);
-void DakotaFree(double** pvariables,char*** pvariables_descriptors,char*** presponses_descriptors,int numvariables,int numresponses);
-/*}}}*/
-
-/*Notice the d_, which prefixes anything that is being provided to us by the Dakota pluggin. Careful. some things are ours, some are dakotas!: */
-int DakotaSpawnCore(double* d_responses, int d_numresponses, double* d_variables, char** d_variables_descriptors,int d_numvariables, void* void_femmodel,int counter){
-
-	char     **responses_descriptors    = NULL;      //these are our! there are only numresponsedescriptors of them, not d_numresponses!!!
-	int        numresponsedescriptors;
-	int        solution_type;
-	bool       control_analysis         = false;
-	void     (*solutioncore)(FemModel*) = NULL;
-	FemModel  *femmodel                 = NULL;
-	bool       nodakotacore             = true;
-
-	/*If counter==-1 on cpu0, it means that the dakota runs are done. In which case, bail out and return 0: */
-	ISSM_MPI_Bcast(&counter,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
-	if(counter==-1)return 0;
-
-	/*cast void_femmodel to FemModel, and at the same time, make a copy, so we start this new core run for this specific sample 
-	 *with a brand new copy of the model, which has not been tempered with by previous dakota runs: */
-	femmodel=(reinterpret_cast<FemModel*>(void_femmodel))->copy();
-
-	/*retrieve parameters: */
-	femmodel->parameters->FindParam(&responses_descriptors,&numresponsedescriptors,QmuResponsedescriptorsEnum);
-	femmodel->parameters->FindParam(&solution_type,SolutionTypeEnum);
-	femmodel->parameters->FindParam(&control_analysis,InversionIscontrolEnum);
-
-	if(VerboseQmu()) _printf0_("qmu iteration: " << counter << "\n");
-
-	/* only cpu 0, running dakota is providing us with variables and variables_descriptors and numresponses: broadcast onto other cpus: */
-	DakotaMPI_Bcast(&d_variables,&d_variables_descriptors,&d_numvariables,&d_numresponses);
-
-	/*Modify core inputs in objects contained in femmodel, to reflect the dakota variables inputs: */
-	InputUpdateFromDakotax(femmodel,d_variables,d_variables_descriptors,d_numvariables);
-
-	/*Determine solution sequence: */
-	if(VerboseQmu()) _printf0_("Starting " << EnumToStringx(solution_type) << " core:\n");
-	WrapperCorePointerFromSolutionEnum(&solutioncore,femmodel->parameters,solution_type,nodakotacore);
-
-	/*Run the core solution sequence: */
-	solutioncore(femmodel);
-
-	/*compute responses: */
-	if(VerboseQmu()) _printf0_("compute dakota responses:\n");
-	femmodel->DakotaResponsesx(d_responses,responses_descriptors,numresponsedescriptors,d_numresponses);
-
-	/*Free ressources:*/
-	DakotaFree(&d_variables,&d_variables_descriptors,&responses_descriptors, d_numvariables, numresponsedescriptors);
-
-	/*Avoid leaks here: */
-	delete femmodel;
-
-	return 1; //this is critical! do not return 0, otherwise, dakota_core will stop running!
-}
-
-void DakotaMPI_Bcast(double** pvariables, char*** pvariables_descriptors,int* pnumvariables, int* pnumresponses){ /*{{{*/
-
-	/* * \brief: broadcast variables_descriptors, variables, numvariables and numresponses
-	 * from cpu 0 to all other cpus.
-	 */ 
-
-	int i;
-	int my_rank;
-
-	/*inputs and outputs: */
-	double* variables=NULL;
-	char**  variables_descriptors=NULL;
-	int     numvariables;
-	int     numresponses;
-
-	/*intermediary: */
-	char* string=NULL;
-	int   string_length;
-
-	/*recover my_rank:*/
-	my_rank=IssmComm::GetRank();
-
-	/*recover inputs from pointers: */
-	variables=*pvariables;
-	variables_descriptors=*pvariables_descriptors;
-	numvariables=*pnumvariables;
-	numresponses=*pnumresponses;
-
-	/*numvariables: */
-	ISSM_MPI_Bcast(&numvariables,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
-
-	/*variables:*/
-	if(my_rank!=0)variables=xNew<double>(numvariables);
-	ISSM_MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,IssmComm::GetComm()); 
-
-	/*variables_descriptors: */
-	if(my_rank!=0){
-		variables_descriptors=xNew<char*>(numvariables);
-	}
-	for(i=0;i<numvariables;i++){
-		if(my_rank==0){
-			string=variables_descriptors[i];
-			string_length=(strlen(string)+1)*sizeof(char);
-		}
-		ISSM_MPI_Bcast(&string_length,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
-		if(my_rank!=0)string=xNew<char>(string_length);
-		ISSM_MPI_Bcast(string,string_length,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 
-		if(my_rank!=0)variables_descriptors[i]=string;
-	}
-
-	/*numresponses: */
-	ISSM_MPI_Bcast(&numresponses,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
-
-	/*Assign output pointers:*/
-	*pnumvariables=numvariables;
-	*pvariables=variables;
-	*pvariables_descriptors=variables_descriptors;
-	*pnumresponses=numresponses;
-} /*}}}*/
-void DakotaFree(double** pvariables,char*** pvariables_descriptors,char*** presponses_descriptors,int numvariables,int numresponses){ /*{{{*/
-
-	/*\brief DakotaFree: free allocations on other cpus, not done by Dakota.*/
-
-	int i;
-	int my_rank;
-
-	double  *variables             = NULL;
-	char   **variables_descriptors = NULL;
-	char   **responses_descriptors = NULL;
-	char    *string                = NULL;
-
-	/*recover pointers: */
-	variables=*pvariables;
-	variables_descriptors=*pvariables_descriptors;
-	responses_descriptors=*presponses_descriptors;
-
-	/*recover my_rank:*/
-	my_rank=IssmComm::GetRank();
-
-	/*Free variables and variables_descriptors only on cpu !=0*/
-	if(my_rank!=0){
-		xDelete<double>(variables);
-		for(i=0;i<numvariables;i++){
-			string=variables_descriptors[i];
-			xDelete<char>(string);
-		}
-		xDelete<char*>(variables_descriptors);
-	}
-
-	//responses descriptors on every cpu
-	for(i=0;i<numresponses;i++){
-		string=responses_descriptors[i];
-		xDelete<char>(string);
-	}
-	//rest of dynamic allocations.
-	xDelete<char*>(responses_descriptors);
-
-	/*Assign output pointers:*/
-	*pvariables=variables;
-	*pvariables_descriptors=variables_descriptors;
-	*presponses_descriptors=responses_descriptors;
-} /*}}}*/
Index: sm/trunk-jpl/src/c/cores/DakotaSpawnCore.h
===================================================================
--- /issm/trunk-jpl/src/c/cores/DakotaSpawnCore.h	(revision 19631)
+++ 	(revision )
@@ -1,4 +1,0 @@
-#ifndef _DAKOTA_SPAWN_CORE_
-#define _DAKOTA_SPAWN_CORE_
-int  DakotaSpawnCore(double* responses, int numresponses, double* variables, char** variables_descriptors,int numvariables, void* femmodel,int counter);
-#endif
Index: /issm/trunk-jpl/src/c/cores/dakota_core.cpp
===================================================================
--- /issm/trunk-jpl/src/c/cores/dakota_core.cpp	(revision 19631)
+++ /issm/trunk-jpl/src/c/cores/dakota_core.cpp	(revision 19632)
@@ -27,5 +27,24 @@
  */ 
 
-/*include files: {{{*/
+ /* \brief: run core ISSM solution using Dakota inputs coming from CPU 0.
+ * \sa qmu.cpp DakotaPlugin.cpp
+ *
+ * This routine needs to be understood simultaneously with qmu.cpp and DakotaPlugin. 
+ * DakotaSpawnCoreParallel is called by all CPUS, with CPU 0 holding Dakota variable values, along 
+ * with variable descriptors. 
+ *
+ * DakotaSpawnCoreParallel takes care of broadcasting the variables and their descriptors across the MPI 
+ * ring. Once this is done, we use the variables to modify the inputs for the solution core. 
+ * For ex, if "rho_ice" is provided, for ex 920, we include "rho_ice" in the inputs, then 
+ * call the core with the modified inputs. This is the way we get Dakota to explore the parameter 
+ * spce of the core. 
+ *
+ * Once the core is called, we process the results of the core, and using the processed results, 
+ * we compute response functions. The responses are computed on all CPUS, but they are targeted 
+ * for CPU 0, which will get these values back to the Dakota engine. 
+ *
+ */ 
+
+/*include config: {{{*/
 #ifdef HAVE_CONFIG_H
 #include <config.h>
@@ -33,5 +52,7 @@
 #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
 #endif
-
+/*}}}*/
+
+/*include ISSM files: */
 #include "./cores.h"
 #include "../toolkits/toolkits.h"
@@ -39,6 +60,7 @@
 #include "../classes/classes.h"
 
-#ifdef _HAVE_DAKOTA_ //only works if dakota library has been compiled in.
-#if _DAKOTA_MAJOR_ < 5 || (_DAKOTA_MAJOR_ == 5 && _DAKOTA_MINOR_ < 3)
+#if defined(_HAVE_DAKOTA_) && (_DAKOTA_MAJOR_ < 5 || (_DAKOTA_MAJOR_ == 5 && _DAKOTA_MINOR_ < 3))
+
+/*include dakota files: */
 #include <ParallelLibrary.H>
 #include <ProblemDescDB.H>
@@ -46,114 +68,161 @@
 #include <DakotaModel.H>
 #include <DakotaInterface.H>
-#else
-#include <ParallelLibrary.hpp>
-#include <ProblemDescDB.hpp>
-#include <DakotaStrategy.hpp>
-#include <DakotaModel.hpp>
-#include <DakotaInterface.hpp>
-#endif
-#include "./DakotaSpawnCore.h"
-#endif
+int  DakotaSpawnCore(double* d_responses, int d_numresponses, double* d_variables, char** d_variables_descriptors,int d_numvariables, void* void_femmodel,int counter){ /*{{{*/
+
+	/*Notice the d_, which prefixes anything that is being provided to us by the Dakota pluggin. Careful. some things are ours, some are dakotas!: */
+
+	char     **responses_descriptors    = NULL;      //these are our! there are only numresponsedescriptors of them, not d_numresponses!!!
+	int        numresponsedescriptors;
+	int        solution_type;
+	bool       control_analysis         = false;
+	void     (*solutioncore)(FemModel*) = NULL;
+	FemModel  *femmodel                 = NULL;
+	bool       nodakotacore             = true;
+
+	/*If counter==-1 on cpu0, it means that the dakota runs are done. In which case, bail out and return 0: */
+	ISSM_MPI_Bcast(&counter,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+	if(counter==-1)return 0;
+
+	/*cast void_femmodel to FemModel, and at the same time, make a copy, so we start this new core run for this specific sample 
+	 *with a brand new copy of the model, which has not been tempered with by previous dakota runs: */
+	femmodel=(reinterpret_cast<FemModel*>(void_femmodel))->copy();
+
+	/*retrieve parameters: */
+	femmodel->parameters->FindParam(&responses_descriptors,&numresponsedescriptors,QmuResponsedescriptorsEnum);
+	femmodel->parameters->FindParam(&solution_type,SolutionTypeEnum);
+	femmodel->parameters->FindParam(&control_analysis,InversionIscontrolEnum);
+
+	if(VerboseQmu()) _printf0_("qmu iteration: " << counter << "\n");
+
+	/* only cpu 0, running dakota is providing us with variables and variables_descriptors and numresponses: broadcast onto other cpus: */
+	DakotaMPI_Bcast(&d_variables,&d_variables_descriptors,&d_numvariables,&d_numresponses);
+
+	/*Modify core inputs in objects contained in femmodel, to reflect the dakota variables inputs: */
+	InputUpdateFromDakotax(femmodel,d_variables,d_variables_descriptors,d_numvariables);
+
+	/*Determine solution sequence: */
+	if(VerboseQmu()) _printf0_("Starting " << EnumToStringx(solution_type) << " core:\n");
+	WrapperCorePointerFromSolutionEnum(&solutioncore,femmodel->parameters,solution_type,nodakotacore);
+
+	/*Run the core solution sequence: */
+	solutioncore(femmodel);
+
+	/*compute responses: */
+	if(VerboseQmu()) _printf0_("compute dakota responses:\n");
+	femmodel->DakotaResponsesx(d_responses,responses_descriptors,numresponsedescriptors,d_numresponses);
+
+	/*Free ressources:*/
+	DakotaFree(&d_variables,&d_variables_descriptors,&responses_descriptors, d_numvariables, numresponsedescriptors);
+
+	/*Avoid leaks here: */
+	delete femmodel;
+
+	return 1; //this is critical! do not return 0, otherwise, dakota_core will stop running!
+} 
 /*}}}*/
-/*DakotaPlugin class {{{*/
-#ifdef _HAVE_DAKOTA_ //only works if dakota library has been compiled in.
-#if _DAKOTA_MAJOR_ < 5 || (_DAKOTA_MAJOR_ == 5 && _DAKOTA_MINOR_ < 3)
-#include <DirectApplicInterface.H>
-#include <DakotaResponse.H>
-#include <ParamResponsePair.H>
-#include <system_defs.h>
-#include <ProblemDescDB.H>
-#include <ParallelLibrary.H>
-#else
-#include <DirectApplicInterface.hpp>
-#include <DakotaResponse.hpp>
-#include <ParamResponsePair.hpp>
-#include <ProblemDescDB.hpp>
-#include <ParallelLibrary.hpp>
-#endif
-namespace SIM {
-	class DakotaPlugin: public Dakota::DirectApplicInterface{
-		public:
-			/*these fields are used by core solutions: */
-			void *femmodel;
-			int   counter;
-			/*Constructors/Destructors*/
-			DakotaPlugin(const Dakota::ProblemDescDB& problem_db,void* in_femmodel):Dakota::DirectApplicInterface(problem_db){/*{{{*/
-				femmodel = in_femmodel;
-				counter  = 0;
-			}/*}}}*/
-			~DakotaPlugin(){/*{{{*/
-				/* Virtual destructor handles referenceCount at Interface level. */ 
-			}/*}}}*/
-		protected:
-			/*execute the input filter portion of a direct evaluation invocation*/
-			//int derived_map_if(const Dakota::String& if_name);
-			/*execute an analysis code portion of a direct evaluation invocation*/
-			int derived_map_ac(const Dakota::String& driver){/*{{{*/
-
-				int i;
-				IssmDouble* variables=NULL;
-				char** variable_descriptors=NULL;
-				char*  variable_descriptor=NULL;
-				IssmDouble* responses=NULL;
-
-				/*increae counter: */
-				counter++;
-
-				/*Before launching analysis, we need to transfer the dakota inputs into Issm 
-				 *readable variables: */
-
-				/*First, the variables: */
-				variables=xNew<IssmDouble>(numACV);
-				for(i=0;i<numACV;i++){
-					variables[i]=xC[i];
-				}
-				/*The descriptors: */
-				variable_descriptors=xNew<char*>(numACV);
-				for(i=0;i<numACV;i++){
-					std::string label=xCLabels[i];
-					variable_descriptor=xNew<char>(strlen(label.c_str())+1);
-					memcpy(variable_descriptor,label.c_str(),(strlen(label.c_str())+1)*sizeof(char));
-
-					variable_descriptors[i]=variable_descriptor;
-				}
-
-				/*Initialize responses: */
-				responses=xNewZeroInit<IssmDouble>(numFns);
-
-				/*run core solution: */
-				DakotaSpawnCore(responses,numFns, variables,variable_descriptors,numACV,femmodel,counter);
-
-				/*populate responses: */
-				for(i=0;i<numFns;i++){
-					fnVals[i]=responses[i];
-				}
-
-				/*Free ressources:*/
-				xDelete<IssmDouble>(variables);
-				for(i=0;i<numACV;i++){
-					variable_descriptor=variable_descriptors[i];
-					xDelete<char>(variable_descriptor);
-				}
-				xDelete<char*>(variable_descriptors);
-				xDelete<IssmDouble>(responses);
-
-				return 0;
-			}/*}}}*/
-			/*execute the output filter portion of a direct evaluation invocation*/
-			//int derived_map_of(const Dakota::String& of_name);
-			/*add for issm: */
-			int GetCounter(){/*{{{*/
-				return counter;
-			}/*}}}*/
-		private:
-	};
-} 
-#endif
-/*}}}*/
-
-void dakota_core(FemModel* femmodel){ 
-
-	#ifdef _HAVE_DAKOTA_ //only works if dakota library has been compiled in.
+void DakotaMPI_Bcast(double** pvariables, char*** pvariables_descriptors,int* pnumvariables, int* pnumresponses){ /*{{{*/
+
+	/* * \brief: broadcast variables_descriptors, variables, numvariables and numresponses
+	 * from cpu 0 to all other cpus.
+	 */ 
+
+	int i;
+	int my_rank;
+
+	/*inputs and outputs: */
+	double* variables=NULL;
+	char**  variables_descriptors=NULL;
+	int     numvariables;
+	int     numresponses;
+
+	/*intermediary: */
+	char* string=NULL;
+	int   string_length;
+
+	/*recover my_rank:*/
+	my_rank=IssmComm::GetRank();
+
+	/*recover inputs from pointers: */
+	variables=*pvariables;
+	variables_descriptors=*pvariables_descriptors;
+	numvariables=*pnumvariables;
+	numresponses=*pnumresponses;
+
+	/*numvariables: */
+	ISSM_MPI_Bcast(&numvariables,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+
+	/*variables:*/
+	if(my_rank!=0)variables=xNew<double>(numvariables);
+	ISSM_MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,IssmComm::GetComm()); 
+
+	/*variables_descriptors: */
+	if(my_rank!=0){
+		variables_descriptors=xNew<char*>(numvariables);
+	}
+	for(i=0;i<numvariables;i++){
+		if(my_rank==0){
+			string=variables_descriptors[i];
+			string_length=(strlen(string)+1)*sizeof(char);
+		}
+		ISSM_MPI_Bcast(&string_length,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+		if(my_rank!=0)string=xNew<char>(string_length);
+		ISSM_MPI_Bcast(string,string_length,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 
+		if(my_rank!=0)variables_descriptors[i]=string;
+	}
+
+	/*numresponses: */
+	ISSM_MPI_Bcast(&numresponses,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+
+	/*Assign output pointers:*/
+	*pnumvariables=numvariables;
+	*pvariables=variables;
+	*pvariables_descriptors=variables_descriptors;
+	*pnumresponses=numresponses;
+} /*}}}*/
+void DakotaFree(double** pvariables,char*** pvariables_descriptors,char*** presponses_descriptors,int numvariables,int numresponses){ /*{{{*/
+
+	/*\brief DakotaFree: free allocations on other cpus, not done by Dakota.*/
+
+	int i;
+	int my_rank;
+
+	double  *variables             = NULL;
+	char   **variables_descriptors = NULL;
+	char   **responses_descriptors = NULL;
+	char    *string                = NULL;
+
+	/*recover pointers: */
+	variables=*pvariables;
+	variables_descriptors=*pvariables_descriptors;
+	responses_descriptors=*presponses_descriptors;
+
+	/*recover my_rank:*/
+	my_rank=IssmComm::GetRank();
+
+	/*Free variables and variables_descriptors only on cpu !=0*/
+	if(my_rank!=0){
+		xDelete<double>(variables);
+		for(i=0;i<numvariables;i++){
+			string=variables_descriptors[i];
+			xDelete<char>(string);
+		}
+		xDelete<char*>(variables_descriptors);
+	}
+
+	//responses descriptors on every cpu
+	for(i=0;i<numresponses;i++){
+		string=responses_descriptors[i];
+		xDelete<char>(string);
+	}
+	//rest of dynamic allocations.
+	xDelete<char*>(responses_descriptors);
+
+	/*Assign output pointers:*/
+	*pvariables=variables;
+	*pvariables_descriptors=variables_descriptors;
+	*presponses_descriptors=responses_descriptors;
+} /*}}}*/
+void dakota_core(FemModel* femmodel){  /*{{{*/
+
 
 	int                my_rank;
@@ -227,4 +296,8 @@
 	xDelete<char>(dakota_output_file);
 
-	#endif //#ifdef _HAVE_DAKOTA_
+} /*}}}*/
+#else
+void dakota_core(FemModel* femmodel){ 
+	/*do nothing*/
 }
+#endif
Index: /issm/trunk-jpl/src/c/main/issm_dakota.cpp
===================================================================
--- /issm/trunk-jpl/src/c/main/issm_dakota.cpp	(revision 19632)
+++ /issm/trunk-jpl/src/c/main/issm_dakota.cpp	(revision 19632)
@@ -0,0 +1,82 @@
+/*!\file:  issm_dakota.cpp
+ * \brief: ISSM DAKOTA main program
+ */ 
+
+#include "./issm.h"
+
+/*Dakota includes: */
+#include "ParallelLibrary.hpp"
+#include "ProblemDescDB.hpp"
+#include "LibraryEnvironment.hpp"
+#include "DakotaModel.hpp"
+#include "DakotaInterface.hpp"
+
+int main(int argc,char **argv){
+
+	bool parallel=true;
+	char* dakota_input_file=NULL;
+
+	/*Define MPI_DEBUG in dakota_global_defs.cpp to cause a hold here*/
+	Dakota::mpi_debug_hold();
+	
+	/*Initialize MPI: */
+	ISSM_MPI_Init(&argc, &argv); // initialize MPI
+
+	/*Recover file name for dakota input file:*/
+	dakota_input_file=xNew<char>((strlen(argv[2])+strlen(argv[3])+strlen(".qmu.in")+1));
+	sprintf(dakota_input_file,"%s%s%s",argv[2],argv[3],".qmu.in");
+
+	Cout << "dakota_input_file: " << dakota_input_file << "\n";
+	
+	/* Parse input and construct Dakota LibraryEnvironment, performing input data checks*/
+	Dakota::ProgramOptions opts;
+	opts.input_file(dakota_input_file);
+
+	
+	/* Defaults constructs the MPIManager, which assumes COMM_WORLD*/
+	Dakota::LibraryEnvironment env(opts);
+
+	if (env.mpi_manager().world_rank() == 0)
+		Cout << "Library mode 1: run_dakota_parse()\n";
+	
+	/* get the list of all models matching the specified model, interface, driver:*/
+	Dakota::ModelList filt_models = env.filtered_model_list("single", "direct", "matlab");
+	if (filt_models.empty()) {
+		Cerr << "Error: no parallel interface plugin performed.  Check compatibility "
+			<< "between parallel\n       configuration and selected analysis_driver."
+			<< std::endl;
+		Dakota::abort_handler(-1);
+	}
+	
+	Dakota::ProblemDescDB& problem_db = env.problem_description_db();
+	Dakota::ModelLIter ml_iter;
+	size_t model_index = problem_db.get_db_model_node(); // for restoration
+	for (ml_iter = filt_models.begin(); ml_iter != filt_models.end(); ++ml_iter) {
+		// set DB nodes to input specification for this Model
+		problem_db.set_db_model_nodes(ml_iter->model_id());
+
+		Dakota::Interface& model_interface = ml_iter->derived_interface();
+
+		// Parallel case: plug in derived Interface object with an analysisComm.
+		// Note: retrieval and passing of analysisComm is necessary only if
+		// parallel operations will be performed in the derived constructor.
+
+		// retrieve the currently active analysisComm from the Model.  In the most
+		// general case, need an array of Comms to cover all Model configurations.
+		const MPI_Comm& analysis_comm = ml_iter->analysis_comm();
+
+		// don't increment ref count since no other envelope shares this letter
+		model_interface.assign_rep(new
+				SIM::IssmParallelDirectApplicInterface(problem_db, analysis_comm,NULL), false);
+	}
+	problem_db.set_db_model_nodes(model_index);            // restore
+
+	/* Execute the environment:*/
+	env.execute();
+
+	/*Finalize MPI:*/
+	ISSM_MPI_Finalize();
+
+	/*Return unix success: */
+	return 0; 
+}
