Index: /issm/trunk-jpl/src/m/classes/clusters/lonestar.m
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/lonestar.m	(revision 19669)
+++ /issm/trunk-jpl/src/m/classes/clusters/lonestar.m	(revision 19670)
@@ -21,8 +21,4 @@
 		 time          = 24*60*60;
 	 end
-	 properties (SetAccess=private) 
-		 np=20*8;
-		 % }}}
-	 end
 	 methods
 		 function cluster=lonestar(varargin) % {{{
@@ -33,4 +29,5 @@
 			 %use provided options to change fields
 			 cluster=AssignObjectFields(pairoptions(varargin{:}),cluster);
+
 		 end
 		 %}}}
@@ -44,5 +41,5 @@
 			 disp(sprintf('    numnodes: %i',cluster.numnodes));
 			 disp(sprintf('    cpuspernode: %i',cluster.cpuspernode));
-			 disp(sprintf('    np: %i',cluster.cpuspernode*cluster.numnodes));
+			 disp(sprintf('    np: %i',cluster.np));
 			 disp(sprintf('    queue: %s',cluster.queue));
 			 disp(sprintf('    codepath: %s',cluster.codepath));
@@ -50,4 +47,9 @@
 			 disp(sprintf('    interactive: %i',cluster.interactive));
 			 disp(sprintf('    time: %i',cluster.time));
+		 end
+		 %}}}
+		 function numprocs=np(cluster) % {{{
+			 %compute number of processors
+			 numprocs=cluster.numnodes*cluster.cpuspernode;
 		 end
 		 %}}}
@@ -70,7 +72,4 @@
 			 if(isvalgrind), disp('valgrind not supported by cluster, ignoring...'); end
 			 if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
-
-			 %compute number of processors
-			 cluster.np=cluster.numnodes*cluster.cpuspernode;
 
 			 %write queuing script 
@@ -99,6 +98,11 @@
 			 if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
 
-			 %compute number of processors
-			 cluster.np=cluster.numnodes*cluster.cpuspernode;
+			 executable='issm.exe';
+			 if isdakota,
+				 version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));
+				 if (version>=6),
+					 executable='issm_dakota.exe';
+				 end
+			 end
 
 			 %write queuing script 
@@ -122,5 +126,5 @@
 			 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n');       %FIXME
 			 fprintf(fid,'cd %s/%s\n\n',cluster.executionpath,dirname);
-			 fprintf(fid,'mpiexec -np %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
+			 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
 			 if ~io_gather, %concatenate the output files:
 				 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
@@ -131,5 +135,5 @@
 			 if cluster.interactive,
 				 fid=fopen([modelname '.run'],'w');
-				 fprintf(fid,'mpiexec -np %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
+				 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,executable,cluster.codepath,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
 				 if ~io_gather, %concatenate the output files:
 					 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
Index: /issm/trunk-jpl/src/m/classes/clusters/pfe.m
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/pfe.m	(revision 19669)
+++ /issm/trunk-jpl/src/m/classes/clusters/pfe.m	(revision 19670)
@@ -26,8 +26,4 @@
 		 hyperthreading = 0;
 	 end
-	 properties (SetAccess=private) 
-		 np=20*8;
-		 % }}}
-	 end
 	 methods
 		 function cluster=pfe(varargin) % {{{
@@ -60,4 +56,9 @@
 		 end
 		 %}}}
+		 function numprocs=np(cluster) % {{{
+			 %compute number of processors
+			 numprocs=cluster.numnodes*cluster.cpuspernode;
+		 end
+		 %}}}
 		 function md = checkconsistency(cluster,md,solution,analyses) % {{{
 
@@ -116,6 +117,11 @@
 			 if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
 
-			 %compute number of processors
-			 cluster.np=cluster.numnodes*cluster.cpuspernode;
+			 executable='issm.exe';
+			 if isdakota,
+				 version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));
+				 if (version>=6),
+					 executable='issm_dakota.exe';
+				 end
+			 end
 
 			 %write queuing script 
@@ -139,7 +145,7 @@
 			 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname);
 			 if ~isvalgrind,
-				 fprintf(fid,'mpiexec -np %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
-			 else
-				 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
+				 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
+			 else
+				 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,EnumToString(solution),[cluster.executionpath '/' dirname],modelname);
 			 end
 			 if ~io_gather, %concatenate the output files:
@@ -153,10 +159,10 @@
 				 if cluster.interactive==10,
 						 fprintf(fid,'module unload mpi-mvapich2/1.4.1/gcc\n');
-						 fprintf(fid,'mpiexec -np %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,EnumToString(solution),[pwd() '/run'],modelname);
+						 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,EnumToString(solution),[pwd() '/run'],modelname);
 				 else
 					 if ~isvalgrind,
-						 fprintf(fid,'mpiexec -np %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,EnumToString(solution),[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);
+						 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,EnumToString(solution),[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);
 					 else
-						 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,EnumToString(solution),[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);
+						 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,EnumToString(solution),[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);
 					 end
 				 end
@@ -174,7 +180,4 @@
 
 			 if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
-
-			 %compute number of processors
-			 cluster.np=cluster.numnodes*cluster.cpuspernode;
 
 			 %write queuing script 
Index: /issm/trunk-jpl/src/m/qmu/dakota_in_write.m
===================================================================
--- /issm/trunk-jpl/src/m/qmu/dakota_in_write.m	(revision 19669)
+++ /issm/trunk-jpl/src/m/qmu/dakota_in_write.m	(revision 19670)
@@ -85,4 +85,6 @@
 if IssmConfig('_DAKOTA_VERSION_') < 6,
 	strategy_write(fidi,params);
+else
+	environment_write(fidi,params);
 end
 
@@ -119,9 +121,23 @@
 
 fprintf(fidi,'strategy,\n');
-fprintf(fidi,'\tsingle_method\n');
+fprintf(fidi,'\tsingle_method\n\n');
 param_write(fidi,'\t  ','graphics','','\n',params);
 param_write(fidi,'\t  ','tabular_graphics_data','','\n',params);
 param_write(fidi,'\t  ','tabular_graphics_file',' ''','''\n',params);
 fprintf(fidi,'\n');
+
+end
+
+%%  function to write the environment section of the file
+
+function []=environment_write(fidi,params)
+
+	display('Writing environment section of Dakota input file.');
+
+	fprintf(fidi,'environment,\n');
+	param_write(fidi,'\t  ','graphics','','\n',params);
+	param_write(fidi,'\t  ','tabular_graphics_data','','\n',params);
+	param_write(fidi,'\t  ','tabular_graphics_file',' ''','''\n',params);
+	fprintf(fidi,'\n');
 
 end
