Index: /issm/trunk-jpl/src/m/classes/clusters/generic.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/generic.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/classes/clusters/generic.py	(revision 26183)
@@ -121,6 +121,6 @@
 
                 if IssmConfig('_HAVE_MPI_')[0]:
-                    fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '.
-                              format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
+                    fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
+                    #fid.write('mpiexec -np {} {} --tool=callgrind {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
                 else:
                     fid.write('{} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '.
Index: sm/trunk-jpl/src/m/classes/clusters/hexagon.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/hexagon.py	(revision 26182)
+++ 	(revision )
@@ -1,145 +1,0 @@
-import subprocess
-from fielddisplay import fielddisplay
-from pairoptions import pairoptions
-from issmssh import issmssh
-from issmscpin import issmscpin
-from issmscpout import issmscpout
-from IssmConfig import IssmConfig
-import datetime
-try:
-    from hexagon_settings import hexagon_settings
-except ImportError:
-    print('You need hexagon_settings.py to proceed, check presence and sys.path')
-
-
-class hexagon(object):
-    """
-    Hexagon cluster class definition
-    Hexagon have nodes built of 2 * 16 CPUs. Nodes are dedicated to one job so the best usage is to use 32 procs per nodes (16 per cores) as it is what is billed anyway.
-    You can reduce this number if you run out of memory as the total node memory is divided by the number of procs
-       Usage:
-          cluster = hexagon()
-    """
-
-    def __init__(self, *args):  # {{{
-        self.name = 'hexagon'
-        self.login = ''
-        self.numnodes = 2
-        self.procspernodes = 32
-        self.mem = 32000
-        self.queue = 'batch'
-        self.time = 2 * 60
-        self.codepath = ''
-        self.executionpath = ''
-        self.interactive = 0
-        self.port = []
-        self.accountname = ''
-
-    #use provided options to change fields
-        options = pairoptions(*args)
-
-    #initialize cluster using user settings if provided
-        self = hexagon_settings(self)
-
-    #OK get other fields
-        self = options.AssignObjectFields(self)
-        self.np = self.numnodes * self.procspernodes
-    # }}}
-
-    def __repr__(self):      # {{{
-        #  display the object
-        s = "class hexagon object:"
-        s = "%s\n%s" % (s, fielddisplay(self, 'name', 'name of the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'login', 'login'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'numnodes', 'number of nodes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'procspernodes', 'number of mpi procs per nodes  default and optimal is 32'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'mem', 'Total node memory'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'queue', 'name of the queue'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'time', 'walltime requested in minutes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'codepath', 'code path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'execution path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'interactive', ''))
-        s = "%s\n%s" % (s, fielddisplay(self, 'accountname', 'your cluster account'))
-        return s
-    # }}}
-
-    def checkconsistency(self, md, solution, analyses):      # {{{
-        #mem should not be over 32000mb
-        #numprocs should not be over 4096
-        #we have cpupernodes * numberofcpus = mppwidth and mppnppn = cpupernodes,
-        #Miscelaneous
-        if not self.login:
-            md = md.checkmessage('login empty')
-        if not self.codepath:
-            md = md.checkmessage('codepath empty')
-        if not self.executionpath:
-            md = md.checkmessage('executionpath empty')
-        if self.interactive == 1:
-            md = md.checkmessage('interactive mode not implemented')
-        if self.mem > 32000:
-            md = md.checkmessage('asking too much memory max is 32000 per node')
-        return self
-    # }}}
-
-    def BuildQueueScript(self, dirname, modelname, solution, io_gather, isvalgrind, isgprof, isdakota, isoceancoupling):    # {{{
-        executable = 'issm.exe'
-        if isdakota:
-            version = IssmConfig('_DAKOTA_VERSION_')[0:2]
-            version = float(version)
-            if version >= 6:
-                executable = 'issm_dakota.exe'
-        if isoceancoupling:
-            executable = 'issm_ocean.exe'
-
-    #write queuing script
-        shortname = modelname[0:min(12, len(modelname))]
-        fid = open(modelname + '.queue', 'w')
-        fid.write('  #!/bin/bash\n')
-        fid.write('  #PBS - N %s \n' % shortname)
-        fid.write('  #PBS - l mppwidth=%i, mppnppn=%i\n' % (self.np, self.procspernodes))
-        timeobj = datetime.timedelta(minutes=self.time)
-        m, s = divmod(timeobj.total_seconds(), 60)
-        h, m = divmod(m, 60)
-        timestring = "%02d:%02d:%02d" % (h, m, s)
-        fid.write('#PBS -l walltime=%s\n' % timestring)  #walltime is hh:mm:ss
-        fid.write('#PBS -l mppmem=%imb\n' % int(self.mem / self.procspernodes))
-        fid.write('#PBS -A %s\n' % self.accountname)
-        fid.write('#PBS -o %s/%s/%s.outlog \n' % (self.executionpath, dirname, modelname))
-        fid.write('#PBS -e %s/%s/%s.errlog \n\n' % (self.executionpath, dirname, modelname))
-        fid.write('export ISSM_DIR="%s/../"\n' % self.codepath)
-        fid.write('export CRAY_ROOTFS=DSL\n')
-        fid.write('module swap PrgEnv-cray / 5.2.40 PrgEnv - gnu\n')
-        fid.write('module load cray-petsc\n')
-        fid.write('module load cray-tpsl\n')
-        fid.write('module load cray-mpich\n')
-        fid.write('module load gsl\n')
-        fid.write('cd %s/%s/\n\n' % (self.executionpath, dirname))
-        fid.write('aprun -B %s/%s %s %s/%s %s\n' % (self.codepath, executable, str(solution), self.executionpath, dirname, modelname))
-        fid.close()
-    # }}}
-
-    def UploadQueueJob(self, modelname, dirname, filelist):    # {{{
-        #compress the files into one zip.
-        compressstring = 'tar -zcf %s.tar.gz ' % dirname
-        for file in filelist:
-            compressstring += ' %s' % file
-        subprocess.call(compressstring, shell=True)
-
-        print('uploading input file and queueing script')
-        issmscpout(self.name, self.executionpath, self.login, self.port, [dirname + '.tar.gz'])
-    # }}}
-
-    def LaunchQueueJob(self, modelname, dirname, filelist, restart, batch):    # {{{
-        print('launching solution sequence on remote cluster')
-        if restart:
-            launchcommand = 'cd %s && cd %s && qsub %s.queue' % (self.executionpath, dirname, modelname)
-        else:
-            launchcommand = 'cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz  && qsub %s.queue' % (self.executionpath, dirname, dirname, dirname, dirname, dirname, modelname)
-        issmssh(self.name, self.login, self.port, launchcommand)
-    # }}}
-
-    def Download(self, dirname, filelist):    # {{{
-        #copy files from cluster to current directory
-        directory = '%s/%s/' % (self.executionpath, dirname)
-        issmscpin(self.name, self.login, self.port, directory, filelist)
-    # }}}
Index: sm/trunk-jpl/src/m/classes/clusters/vilje.m
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/vilje.m	(revision 26182)
+++ 	(revision )
@@ -1,209 +1,0 @@
-%vilje class definition
-%
-%   Usage:
-%      cluster=greenplanet();
-%      cluster=greenplanet('np',3);
-%      cluster=greenplanet('np',3,'login','username');
-
-classdef vilje
-	properties (SetAccess=public)  
-		% {{{
-		name           = 'vilje';
-		login          = '';
-		numnodes       = 2;
-		cpuspernode    = 32;
-		procspernodes  = 16;
-		mem            = 28;
-		numstreams		= 8; %Henning added
-		queue          = 'workq';
-		time           = 2*60;
-		codepath       = '';
-		executionpath  = '';
-		interactive    = 0;
-		port           = [];
-		accountname    = '';
-		% }}}
-	end
-	methods
-		function cluster=vilje(varargin) % {{{
-
-			%initialize cluster using default settings if provided
-			if (exist('vilje_settings')==2), vilje_settings; end
-
-			%use provided options to change fields
-			cluster=AssignObjectFields(pairoptions(varargin{:}),cluster);
-		end
-		%}}}
-		function disp(cluster) % {{{
-			%  display the object
-			disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1)));
-			disp(sprintf('    name: %s',cluster.name));
-			disp(sprintf('    login: %s',cluster.login));
-			disp(sprintf('    accountname: %s',cluster.accountname));
-			disp(sprintf('    numnodes: %i',cluster.numnodes));
-			disp(sprintf('    cpuspernode: %i',cluster.cpuspernode));
-			disp(sprintf('    np: %i', cluster.cpuspernode*cluster.numnodes));
-			disp(sprintf('    procspernodes: %i',cluster.procspernodes));
-			disp(sprintf('    queue: %s',cluster.queue));
-			disp(sprintf('    codepath: %s',cluster.codepath));
-			disp(sprintf('    executionpath: %s',cluster.executionpath));
-			disp(sprintf('    interactive: %i',cluster.interactive));
-			disp(sprintf('    time: %i',cluster.time));
-			disp(sprintf('    memory: %i',cluster.mem));
-		end
-		%}}}
-		function md = checkconsistency(cluster,md,solution,analyses) % {{{
-
-			available_queues={'workq'};
-			queue_requirements_time=[5*24*60];
-			queue_requirements_np=[30];
-
-			QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,cluster.queue,cluster.np,1)
-
-			%Miscelaneous
-			if isempty(cluster.login), md = checkmessage(md,'login empty'); end
-			if isempty(cluster.accountname), md = checkmessage(md,'accountname empty'); end
-			if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end
-			if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end
-
-		end
-		%}}}
-		function numprocs=np(self) % {{{
-			%compute number of processors
-			numprocs=self.numnodes*self.procspernodes;
-		end
-		%}}}
-		function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{
-
-			if(isvalgrind), disp('valgrind not supported by cluster, ignoring...'); end
-			if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
-
-			%compute number of processors
-% 			cluster.np=cluster.numnodes*cluster.cpuspernode;
-			np(cluster);%=cluster.numnodes*cluster.cpuspernode;
-
-			%write queuing script 
-			fid=fopen([modelname '.queue'],'w');
-			fprintf(fid,'#PBS -S /bin/bash\n');
-			fprintf(fid,'#PBS -N %s\n',modelname);
-			fprintf(fid,'#PBS -l select=%i:ncpus=%i:mpiprocs=%i\n',cluster.numnodes,cluster.cpuspernode,16);
-			
-			%calculate walltime in hh:mm:ss format
-			walltime=datestr(cluster.time/(60*24),'HH:MM:SS');
-			fprintf(fid,'#PBS -l walltime=%s\n',walltime); %walltime should be in hh:mm:ss
-			fprintf(fid,'#PBS -A %s\n',cluster.accountname);
-			fprintf(fid,'#PBS -o %s.outlog \n',modelname);
-			fprintf(fid,'#PBS -e %s.errlog \n\n',modelname);
-			fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME
-			fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n');       %FIXME
-			fprintf(fid,'module load intelcomp/17.0.0\n') 
-			fprintf(fid,'module load mpt/2.14\n')
-			fprintf(fid,'module load petsc/3.7.4d\n')
-			fprintf(fid,'module load parmetis/4.0.3\n') 
-			fprintf(fid,'module load mumps/5.0.2\n')
-			fprintf(fid,'cd %s/%s\n\n',cluster.executionpath,modelname);
-			fprintf(fid,'mpiexec_mpt -n %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname);
-			if ~io_gather, %concatenate the output files:
-				fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
-			end
-			fclose(fid);
-		end
-		%}}}
-		function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{
-
-			if(isvalgrind), disp('valgrind not supported by cluster, ignoring...'); end
-			if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
-
-			executable='issm.exe';
-			if isdakota,
-				version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));
-				if (version>=6),
-					executable='issm_dakota.exe';
-				end
-			end
-			if isoceancoupling,
-				executable='issm_ocean.exe';
-			end
-
-			%compute number of processors
-% 			cluster.np=cluster.numnodes*cluster.cpuspernode;
-			np(cluster);%=cluster.numnodes*cluster.cpuspernode;                     
-% 			shortname = substring(modelname,1,min(12,length(modelname)));
-
-			%write queuing script 
-			fid=fopen([modelname '.queue'],'w');
-			fprintf(fid,'#PBS -S /bin/bash\n');
-% 			fprintf(fid,'#PBS -N %s\n',shortname);
-			fprintf(fid,'#PBS -N %s\n',modelname);
-			fprintf(fid,'#PBS -q %s \n',cluster.queue);
-			fprintf(fid,'#PBS -l select=%i:ncpus=%i:mpiprocs=%i\n',cluster.numnodes,cluster.cpuspernode,cluster.procspernodes);
-
-			%calculate walltime in hh:mm:ss format
-			walltime=datestr(cluster.time/(60*24),'HH:MM:SS');
-% 			fprintf(fid,'#PBS -l walltime=%s\n',duration(0,cluster.time,0)); %walltime is in minutes.
-% 			fprintf(fid,'#PBS -l walltime=%s\n',10); %walltime is in minutes.
-			fprintf(fid,'#PBS -l walltime=%s\n',walltime); %walltime should be in hh:mm:ss
-% 			fprintf(fid,'#PBS -l walltime=%i\n',walltime); %walltime is in hh:mm:ss
-			fprintf(fid,'#PBS -A %s\n',cluster.accountname);
-			fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]);
-			fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]);
-			fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME
-			fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n');       %FIXME
-			fprintf(fid,'module load intelcomp/17.0.0\n') 
-			fprintf(fid,'module load mpt/2.14\n')
-			fprintf(fid,'module load petsc/3.7.4d\n')
-			fprintf(fid,'module load parmetis/4.0.3\n') 
-			fprintf(fid,'module load mumps/5.0.2\n')
-			fprintf(fid,'cd %s/%s\n\n',cluster.executionpath,dirname);
-			fprintf(fid,'mpiexec_mpt -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
-
-			if ~io_gather, %concatenate the output files:
-				fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
-			end
-			fclose(fid);
-
-			%in interactive mode, create a run file, and errlog and outlog file
-			if cluster.interactive,
-				fid=fopen([modelname '.run'],'w');
-				fprintf(fid,'mpiexec_mpt -np %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,solution,[cluster.executionpath '/' dirname],modelname);
-				if ~io_gather, %concatenate the output files:
-					fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
-				end
-				fclose(fid);
-				fid=fopen([modelname '.errlog'],'w');
-				fclose(fid);
-				fid=fopen([modelname '.outlog'],'w');
-				fclose(fid);
-			end
-		end %}}}
-		function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{
-
-			%compress the files into one zip.
-			compressstring=['tar -zcf ' dirname '.tar.gz '];
-			for i=1:numel(filelist),
-				compressstring = [compressstring ' ' filelist{i}];
-			end
-			system(compressstring);
-			disp('uploading input file and queueing script');
-			directory=cluster.executionpath;
-% 			issmbbftpout(cluster.name,directory,cluster.login,cluster.port,cluster.numstreams,{[dirname '.tar.gz']});
-			issmscpout(cluster.name,directory,cluster.login,cluster.port,{[dirname '.tar.gz']});
-
-		end
-		%}}}
-		function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{
-
-			disp('launching solution sequence on remote cluster');
-			launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ...
-				' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz  && hostname && qsub ' modelname '.queue '];
-			issmssh(cluster.name,cluster.login,cluster.port,launchcommand);
-		end %}}}
-		function Download(cluster,dirname,filelist)% {{{
-
-			%copy files from cluster to current directory
-			directory=[cluster.executionpath '/' dirname '/'];
-			issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist);
-
-		end %}}}
-	end
-end
Index: sm/trunk-jpl/src/m/classes/clusters/vilje.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/vilje.py	(revision 26182)
+++ 	(revision )
@@ -1,145 +1,0 @@
-import subprocess
-from fielddisplay import fielddisplay
-from pairoptions import pairoptions
-from issmssh import issmssh
-from issmscpin import issmscpin
-from issmscpout import issmscpout
-from QueueRequirements import QueueRequirements
-from IssmConfig import IssmConfig
-import datetime
-try:
-    from vilje_settings import vilje_settings
-except ImportError:
-    print('You need vilje_settings.py to proceed, check presence and sys.path')
-
-
-class vilje(object):
-    """
-    Vilje cluster class definition
-
-       Usage:
-          cluster = vilje()
-    """
-
-    def __init__(self, *args):    # {{{
-        self.name = 'vilje'
-        self.login = ''
-        self.numnodes = 2
-        self.cpuspernode = 32
-        self.procspernodes = 16
-        self.mem = 28
-        self.queue = 'workq'
-        self.time = 2 * 60
-        self.codepath = ''
-        self.executionpath = ''
-        self.interactive = 0
-        self.port = []
-        self.accountname = ''
-
-    #use provided options to change fields
-        options = pairoptions(*args)
-
-    #initialize cluster using user settings if provided
-        self = vilje_settings(self)
-    #OK get other fields
-        self = options.AssignObjectFields(self)
-        self.np = self.numnodes * self.procspernodes
-    # }}}
-
-    def __repr__(self):    # {{{
-        #  display the object
-        s = "class vilje object:"
-        s = "%s\n%s" % (s, fielddisplay(self, 'name', 'name of the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'login', 'login'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'numnodes', 'number of nodes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'cpuspernode', 'number of nodes per CPUs (32)'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'procspernodes', 'number of mpi procs per nodes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'mem', 'node memory'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'queue', 'name of the queue (test is an option, workq the default)'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'time', 'walltime requested in minutes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'codepath', 'code path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'execution path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'interactive', ''))
-        s = "%s\n%s" % (s, fielddisplay(self, 'accountname', 'your cluster account'))
-        return s
-    # }}}
-
-    def checkconsistency(self, md, solution, analyses):    # {{{
-        #Queue dictionarry  gives queu name as key and max walltime and cpus as var
-        queuedict = {'workq': [5 * 24 * 60, 30],
-                     'test': [30, 4]}
-        QueueRequirements(queuedict, self.queue, self.np, self.time)
-
-    #Miscelaneous
-        if not self.login:
-            md = md.checkmessage('login empty')
-        if not self.codepath:
-            md = md.checkmessage('codepath empty')
-        if not self.executionpath:
-            md = md.checkmessage('executionpath empty')
-        if self.interactive == 1:
-            md = md.checkmessage('interactive mode not implemented')
-        return self
-    # }}}
-
-    def BuildQueueScript(self, dirname, modelname, solution, io_gather, isvalgrind, isgprof, isdakota, isoceancoupling):    # {{{
-        executable = 'issm.exe'
-        if isdakota:
-            version = IssmConfig('_DAKOTA_VERSION_')[0:2]
-            version = float(version)
-            if version >= 6:
-                executable = 'issm_dakota.exe'
-        if isoceancoupling:
-            executable = 'issm_ocean.exe'
-
-    #write queuing script
-        shortname = modelname[0:min(12, len(modelname))]
-        fid = open(modelname + '.queue', 'w')
-        fid.write('#PBS -S / bin / bash\n')
-        fid.write('#PBS -N %s \n' % shortname)
-        fid.write('#PBS -q %s \n' % self.queue)
-        fid.write('#PBS -l select=%i:ncpus=%i:mpiprocs=%s\n' % (self.numnodes, self.cpuspernode, self.procspernodes))
-        timeobj = datetime.timedelta(minutes=self.time)
-        m, s = divmod(timeobj.total_seconds(), 60)
-        h, m = divmod(m, 60)
-        timestring = "%02d:%02d:%02d" % (h, m, s)
-        fid.write('#PBS -l walltime=%s\n' % timestring)  #walltime is hh:mm:ss
-        fid.write('#PBS -A %s\n' % self.accountname)
-        fid.write('#PBS -o %s/%s/%s.outlog \n' % (self.executionpath, dirname, modelname))
-        fid.write('#PBS -e %s/%s/%s.errlog \n\n' % (self.executionpath, dirname, modelname))
-        fid.write('export ISSM_DIR="%s/../ "\n' % self.codepath)
-        fid.write('module load intelcomp/17.0.0\n')
-        fid.write('module load mpt/2.14\n')
-        fid.write('module load petsc/3.7.4d\n')
-        fid.write('module load parmetis/4.0.3\n')
-        fid.write('module load mumps/5.0.2\n')
-        fid.write('cd %s/%s/\n\n' % (self.executionpath, dirname))
-        fid.write('mpiexec_mpt -np %i %s/%s %s %s/%s %s\n' % (self.np, self.codepath, executable, str(solution), self.executionpath, dirname, modelname))
-        fid.close()
-    # }}}
-
-    def UploadQueueJob(self, modelname, dirname, filelist):    # {{{
-        #compress the files into one zip.
-        compressstring = 'tar -zcf %s.tar.gz ' % dirname
-        for file in filelist:
-            compressstring += ' %s' % file
-        subprocess.call(compressstring, shell=True)
-
-        print('uploading input file and queueing script')
-        issmscpout(self.name, self.executionpath, self.login, self.port, [dirname + '.tar.gz'])
-    # }}}
-
-    def LaunchQueueJob(self, modelname, dirname, filelist, restart, batch):    # {{{
-        print('launching solution sequence on remote cluster')
-        if restart:
-            launchcommand = 'cd %s && cd %s && qsub %s.queue' % (self.executionpath, dirname, modelname)
-        else:
-            launchcommand = 'cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz  && qsub %s.queue' % (self.executionpath, dirname, dirname, dirname, dirname, dirname, modelname)
-        issmssh(self.name, self.login, self.port, launchcommand)
-    # }}}
-
-    def Download(self, dirname, filelist):    # {{{
-        #copy files from cluster to current directory
-        directory = '%s/%s/' % (self.executionpath, dirname)
-        issmscpin(self.name, self.login, self.port, directory, filelist)
-    # }}}
Index: /issm/trunk-jpl/src/m/classes/model.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/model.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/classes/model.py	(revision 26183)
@@ -38,8 +38,5 @@
 from generic import generic
 from pfe import pfe
-from vilje import vilje
-from hexagon import hexagon
 from cyclone import cyclone
-from stallo import stallo
 from saga import saga
 from balancethickness import balancethickness
@@ -185,5 +182,5 @@
     def __repr__(obj):  #{{{
         # TODO:
-        # - Convert all formatting to calls to <string>.format (see any 
+        # - Convert all formatting to calls to <string>.format (see any
         #   already converted <class>.__repr__ method for examples)
         #
@@ -893,5 +890,4 @@
                     md.hydrology.__dict__[field] = project2d(md, md.hydrology.__dict__[field], 1)
 
-        # Materials
         md.materials.rheology_B = DepthAverage(md, md.materials.rheology_B)
         md.materials.rheology_n = project2d(md, md.materials.rheology_n, 1)
Index: /issm/trunk-jpl/src/m/coordsystems/gmtmask.m
===================================================================
--- /issm/trunk-jpl/src/m/coordsystems/gmtmask.m	(revision 26182)
+++ /issm/trunk-jpl/src/m/coordsystems/gmtmask.m	(revision 26183)
@@ -6,16 +6,16 @@
 %
 
-	%are we doing a recursive call? 
+	%are we doing a recursive call?
 	if nargin==3,
 		recursive=1;
-	else 
+	else
 		recursive=0;
 	end
-	
+
 	if(recursive)disp(sprintf('             recursing: num vertices %i',length(lat)));
 	else disp(sprintf('gmtmask: num vertices %i',length(lat)));
 	end
-	
-	%Check lat and long size is not more than 50,000; If so, recursively call gmtmask: 
+
+	%Check lat and long size is not more than 50,000; If so, recursively call gmtmask:
 	if length(lat)>50000,
 		for i=1:50000:length(lat),
@@ -28,10 +28,32 @@
 		return
 	end
-	
+
 	%First, write our lat,long file for gmt:
 	nv=length(lat);
-	filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt']; 
-	filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt']; 
+	filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt'];
+	filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt'];
 	dlmwrite(filename_all,[long lat (1:nv)'],'delimiter','\t','precision',10);
+
+	%Avoid bypassing of the ld library path by Matlab (:()
+	%
+	% TODO: Do we really need this (we can/already set it in etc/environment.sh)?
+	if ismac,
+		dyld_library_path_old=getenv('DYLD_LIBRARY_PATH');
+		setenv('DYLD_LIBRARY_PATH',[ issmdir '/externalpackages/curl/install/lib:' issmdir '/externalpackages/hdf5/install/lib:' issmdir '/externalpackages/netcdf/install/lib' ]);
+	end
+
+	%Find path to gmt, list all possible known paths to gmt (you may need to add yours to the list)
+	gmtpaths = {[issmdir '/bin/gmt'],[issmdir '/externalpackages/gmt/install/bin/gmt'],'/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt', '/usr/bin/gmt'};
+	gmtpath = '';
+	for i=gmtpaths
+		if exist(i{1},'file'),
+			gmtpath = i{1};
+			break;
+		end
+	end
+	if isempty(gmtpath),
+		error('gmt not found! Make sure it is properly installed, or add its path to this file.');
+	end
+
 
 	%figure out which vertices are on the ocean, which one on the continent:
@@ -41,7 +63,11 @@
 	end
 
+	%reset DYLD_LIBRARY_PATH to what it was:
+	if ismac,
+		setenv('DYLD_LIBRARY_PATH',dyld_library_path_old);
+	end
 	%read the con_vertices.txt file and flag our mesh vertices on the continent
 	fid=fopen(['./' filename_oce],'r');
-	line=fgets(fid); 
+	line=fgets(fid);
 	line=fgets(fid);
 	oce_vertices=[];
@@ -55,5 +81,7 @@
 	mask=zeros(nv,1);
 	mask(oce_vertices)=1;
-	
+
 	system(['rm -rf ./' filename_all ' ./' filename_oce ' ./gmt.history']);
-	if ~recursive, disp(sprintf('gmtmask: done')); end;
+	if ~recursive,
+		disp(sprintf('gmtmask: done'));
+	end
Index: /issm/trunk-jpl/src/m/coordsystems/gmtmask.py
===================================================================
--- /issm/trunk-jpl/src/m/coordsystems/gmtmask.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/coordsystems/gmtmask.py	(revision 26183)
@@ -44,4 +44,34 @@
     np.savetxt('./all_vertices.txt', np.transpose([long, lat, np.arange(1, nv + 1)]), delimiter='\t', fmt='%.10f')
 
+    #Avoid bypassing of the ld library path by Matlab (:()
+    issm_dir = issmdir()
+
+    try:
+        ismac
+    except NameError:
+        ismac = False
+
+    # TODO: Do we really need this (we can/already set it in etc/environment.sh)?
+    if ismac:
+        dyld_library_path_old = os.getenv('DYLD_LIBRARY_PATH')
+        os.putenv('DYLD_LIBRARY_PATH', issm_dir + '/externalpackages/curl/install/lib:' + issm_dir + '/externalpackages/hdf5/install/lib:' + issm_dir + '/externalpackages/netcdf/install/lib')
+
+    #Find path to gmt (you may need to add yours to the list).
+    #
+    # NOTE: Assumes gmtselect is also in this directory.
+    #
+    gmtpaths = ['/usr/bin/gmt',
+                issm_dir + '/bin/gmt',
+                issm_dir + '/externalpackages/gmt/install/bin/gmt',
+                '/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt']
+    gmtpath = ''
+    for i in range(len(gmtpaths)):
+        if os.path.isfile(gmtpaths[i]):
+            gmtpath = gmtpaths[i]
+            break
+
+    if gmtpath == '':
+        raise Exception('gmt not found! Make sure it is properly installed, or add its path to this file.')
+
     #figure out which vertices are on the ocean, which one on the continent:
     subprocess.call('gmtselect ./ all_vertices.txt -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s > ./oce_vertices.txt', shell=True)
Index: /issm/trunk-jpl/src/m/dev/devpath.m
===================================================================
--- /issm/trunk-jpl/src/m/dev/devpath.m	(revision 26182)
+++ /issm/trunk-jpl/src/m/dev/devpath.m	(revision 26183)
@@ -1,4 +1,4 @@
 % clear the last warning to focus on the warnings of the ISSM path
-lastwarn(''); 
+lastwarn('');
 
 %Recover ISSM_DIR , or if on a Windows machine, ISSM_DIR_WIN
@@ -7,4 +7,5 @@
 else
 	ISSM_DIR=getenv('ISSM_DIR');
+	ISSM_DEV_DIR=getenv('ISSM_DEV_DIR');
 end
 
@@ -13,12 +14,12 @@
 end
 
-%Now add all issm code paths necessary to run issm smoothly. 
-%We capture the error output, so that we can warn the user to update 
-%the variable ISSM_DIR in this file, in case it is not correctly setup. 
+%Now add all issm code paths necessary to run issm smoothly.
+%We capture the error output, so that we can warn the user to update
+%the variable ISSM_DIR in this file, in case it is not correctly setup.
 
 %ISSM path
-addpath([ISSM_DIR '/src/m/os/']); %load recursivepath
-addpath([ISSM_DIR '/lib']);       %load MEX files
-addpath(recursivepath([ISSM_DIR '/src/m']));
+addpath([ISSM_DEV_DIR '/src/m/os/']); %load recursivepath
+addpath([ISSM_DEV_DIR '/lib']);       %load mex
+addpath(recursivepath([ISSM_DEV_DIR '/src/m']));
 addpath(recursivepath([ISSM_DIR '/externalpackages/scotch']));
 addpath(recursivepath([ISSM_DIR '/externalpackages/canos']));
@@ -32,5 +33,5 @@
 clear ISSM_DIR;
 
-%Check on any warning messages that might indicate that the paths were not correct. 
+%Check on any warning messages that might indicate that the paths were not correct.
 if ~isempty(lastwarn),
 	fprintf('\n  Error trying to setup ''ISSM'' code paths. Try and update the ISSM_DIR variable in your .cshrc or .bashrc!\n');
Index: /issm/trunk-jpl/src/m/dev/devpath.py
===================================================================
--- /issm/trunk-jpl/src/m/dev/devpath.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/dev/devpath.py	(revision 26183)
@@ -6,4 +6,5 @@
 #Recover ISSM_DIR and USERNAME
 ISSM_DIR = os.getenv('ISSM_DIR')
+ISSM_DEV_DIR = os.getenv('ISSM_DEV_DIR')
 USERNAME = os.getenv('USER')
 JPL_SVN = os.getenv('JPL_SVN')
@@ -12,5 +13,5 @@
 
 #Go through src/m and append any directory that contains a *.py file to PATH
-for root, dirs, files in os.walk(ISSM_DIR + '/src/m'):
+for root, dirs, files in os.walk(ISSM_DEV_DIR + '/src/m'):
     if '.svn' in dirs:
         dirs.remove('.svn')
@@ -22,10 +23,10 @@
 
 #Also add the Nightly run directory
-if ISSM_DIR + '/test/NightlyRun' not in sys.path:
-    sys.path.append(ISSM_DIR + '/test/NightlyRun')
-if ISSM_DIR + '/lib' not in sys.path:
-    sys.path.append(ISSM_DIR + '/lib')
-if ISSM_DIR + '/src/wrappers/python/.libs' not in sys.path:
-    sys.path.append(ISSM_DIR + '/src/wrappers/python/.libs')
+if ISSM_DEV_DIR + '/test/NightlyRun' not in sys.path:
+    sys.path.append(ISSM_DEV_DIR + '/test/NightlyRun')
+if ISSM_DEV_DIR + '/lib' not in sys.path:
+    sys.path.append(ISSM_DEV_DIR + '/lib')
+if ISSM_DEV_DIR + '/src/wrappers/python/.libs' not in sys.path:
+    sys.path.append(ISSM_DEV_DIR + '/src/wrappers/python/.libs')
 
 # If using clusters, we need to have the path to the cluster settings directory
@@ -48,3 +49,3 @@
 
 print("\n  ISSM development path correctly loaded")
-print("Current path is {}\n\n".format(ISSM_DIR))
+print("Current path is {}\n\n".format(ISSM_DEV_DIR))
Index: /issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.m
===================================================================
--- /issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.m	(revision 26182)
+++ /issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.m	(revision 26183)
@@ -14,4 +14,21 @@
 %      md.mesh=gmshplanet('radius',6000,'resolution',100);
 %      md.mesh=gmshplanet('radius',6000,'resolution',100);
+
+	%Find path to gmsh
+	paths = {[issmdir() 'bin/gmsh'],...
+		 [issmdir() 'externalpackages/gmsh/install/gmsh'],...
+		 ['/usr/bin/gmsh']...
+		};
+	disp(paths{1})
+	gmshpath = '';
+	for i=paths
+		if exist(i{1},'file'),
+			gmshpath = i{1};
+			break;
+		end
+	end
+	if isempty(gmshpath),
+		error('gmsh not found, make sure it is properly installed');
+	end
 
 	% Get Gmsh version
@@ -121,6 +138,6 @@
 	% Call gmsh
 	%
-	% NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 
-	%		modifying our parsing scheme for Gmsh 4, for now, we simply set the 
+	% NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally
+	%		modifying our parsing scheme for Gmsh 4, for now, we simply set the
 	%		"-format" option.
 	%
Index: /issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.py
===================================================================
--- /issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.py	(revision 26183)
@@ -29,5 +29,5 @@
     subproc = subprocess.Popen(subproc_args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     outs, errs = subproc.communicate()
-    if errs != '':
+    if errs.decode('UTF-8') != '':
         raise Exception("gmshplanet: call to gmsh failed: {}".format(errs))
     gmshmajorversion = int(outs)
@@ -124,6 +124,6 @@
     # Call gmsh
     #
-    # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 
-    #       modifying our parsing scheme for Gmsh 4, for now, we simply set the 
+    # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally
+    #       modifying our parsing scheme for Gmsh 4, for now, we simply set the
     #       "-format" option.
     #
Index: /issm/trunk-jpl/src/m/miscellaneous/normfit_issm.py
===================================================================
--- /issm/trunk-jpl/src/m/miscellaneous/normfit_issm.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/miscellaneous/normfit_issm.py	(revision 26183)
@@ -10,10 +10,12 @@
     #  check for any NaN in any columns
     if not np.isnan(x).any():
-
         #  explicitly calculate the moments
         muhat = np.mean(x, 0)
-    # numpy defaults to 0 delta degrees of freedom; matlab uses 1
-        sigmahat = np.std(x, 0, ddof=1)
-
+        # numpy defaults to 0 delta degrees of freedom; matlab uses 1
+        # just check on lenght to avoid dividing by 0
+        if len(x) <= 1:
+            sigmahat = np.std(x, 0, ddof=0)
+        else:
+            sigmahat = np.std(x, 0, ddof=1)
     # no way to ask this in python, assume 4 outputs
     #if (nargout > 2):
@@ -39,4 +41,5 @@
             sigmaci[0, :] = sigmahat
             sigmaci[1, :] = sigmahat
+
     else:
         #  must loop over columns, since number of elements could be different
@@ -48,5 +51,7 @@
     #  remove any NaN and recursively call column
         for j in range(np.shape(x, 1)):
-            [muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
+            #[muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
+            muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
 
-    return [muhat, sigmahat, muci, sigmaci]
+    return muhat, sigmahat, np.squeeze(muci), np.squeeze(sigmaci)
+    #return [muhat, sigmahat, muci, sigmaci]
Index: /issm/trunk-jpl/src/m/partition/partitioner.py
===================================================================
--- /issm/trunk-jpl/src/m/partition/partitioner.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/partition/partitioner.py	(revision 26183)
@@ -46,5 +46,5 @@
         #partitioning essentially happens in 2D. So partition in 2D, then
         #extrude the partition vector vertically.
-        md3d = copy.deepcopy(md) # save for later
+        md3d = copy.deepcopy(md)  # save for later
         md.mesh.elements = md.mesh.elements2d
         md.mesh.x = md.mesh.x2d
@@ -58,4 +58,5 @@
     #adjacency matrix if needed:
     if recomputeadjacency == 'on':
+        print("adj calll")
         md = adjacency(md)
     else:
@@ -67,5 +68,5 @@
         else:
             # default method (from chaco.m)
-            method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321]).conj().transpose()
+            method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321])  #.conj().transpose()
             method[0] = 3  #  global method (3 = inertial (geometric))
             method[2] = 0  #  vertex weights (0 = off, 1 = on)
@@ -84,8 +85,13 @@
 
             #  partition into nparts
+            print("ChacoCall")
             if isinstance(md.mesh, mesh2d):
-                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices, 1)), method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis.
+                print("caseA")
+                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices)), method, npart, np.array([]))[0].conj().transpose() + 1   #index partitions from 1 up. like metis.
             else:
-                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis.
+                print("caseB")
+                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1  #index partitions from 1 up. like metis.
+            print("Done")
+
     elif package == 'scotch':
         if vectortype == 'element':
@@ -93,5 +99,5 @@
         else:
             #are we using weights?
-            if m.strcmpi(options.getfieldvalue('weighting'), 'on'):
+            if options.getfieldvalue('weighting') == 'on':
                 weights = np.floor(md.qmu.vertex_weight / min(md.qmu.vertex_weight))
             else:
@@ -99,5 +105,5 @@
             maptab = Scotch(md.qmu.adjacency, [], weights, [], 'cmplt', [npart])
 
-            part = maptab[:, 1] + 1 #index partitions from 1 up. like metis.
+            part = maptab[:, 1] + 1  #index partitions from 1 up. like metis.
 
     elif package == 'linear':
@@ -116,5 +122,5 @@
     else:
         raise RuntimeError('partitioner error message: could not find {} partitioner'.format(package))
-
+    print("extrude")
     #extrude if we are in 3D:
     if md.mesh.dimension() == 3:
Index: /issm/trunk-jpl/src/m/qmu/dakota_out_parse.py
===================================================================
--- /issm/trunk-jpl/src/m/qmu/dakota_out_parse.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/qmu/dakota_out_parse.py	(revision 26183)
@@ -8,7 +8,7 @@
 from helpers import *
 
-# NOTE: May be rewritten later to take advantage of Python's file I/O 
-# mechanics. As it is written now, it is often difficult to work with, but is 
-# analagous to the MATLAB version of dakota_out_parse.
+#  NOTE: May be rewritten later to take advantage of Python's file I/O
+#  mechanics. As it is written now, it is often difficult to work with, but is
+#  analagous to the MATLAB version of dakota_out_parse.
 
 
@@ -32,31 +32,31 @@
         prcm          (double array, partial rank correlation matrix)
 
-    The filei will be prompted for if empty. The fields of dresp are particular 
-    to the data contained within the file. The scm, pcm, srcm, and prcm are 
+    The filei will be prompted for if empty. The fields of dresp are particular
+    to the data contained within the file. The scm, pcm, srcm, and prcm are
     output by Dakota only for the sampling methods.
 
-    This function reads a Dakota .out output file and parses it into the Python 
-    runtime. It operates in a content-driven fashion, where it skips the 
-    intermediate data and then parses whatever output data it encounters in the 
-    order in which it exists in the file, rather than searching for data based 
-    on the particular method (this makes it independent of method). It also can 
+    This function reads a Dakota .out output file and parses it into the Python
+    runtime. It operates in a content-driven fashion, where it skips the
+    intermediate data and then parses whatever output data it encounters in the
+    order in which it exists in the file, rather than searching for data based
+    on the particular method (this makes it independent of method). It also can
     read and parse the .dat tabular_output file.
 
-    This data would typically be used for plotting and other postprocessing 
+    This data would typically be used for plotting and other postprocessing
     within MATLAB or Excel.
 
     TODO:
-    - Figure out why output from Dakota is different under MATLAB and Python 
+    - Figure out why output from Dakota is different under MATLAB and Python
     (is it the input file that we write?)
 
-    "Copyright 2009, by the California Institute of Technology. ALL RIGHTS 
-    RESERVED. United States Government Sponsorship acknowledged. Any commercial 
-    use must be negotiated with the Office of Technology Transfer at the 
+    "Copyright 2009, by the California Institute of Technology. ALL RIGHTS
+    RESERVED. United States Government Sponsorship acknowledged. Any commercial
+    use must be negotiated with the Office of Technology Transfer at the
     California Institute of Technology. (NTR 47078)
 
-    This software may be subject to U.S. export control laws. By accepting this 
-    software, the user agrees to comply with all applicable U.S. export laws 
-    and regulations. User has the responsibility to obtain export licenses, or 
-    other export authority as may be required before exporting such information 
+    This software may be subject to U.S. export control laws. By accepting this
+    software, the user agrees to comply with all applicable U.S. export laws
+    and regulations. User has the responsibility to obtain export licenses, or
+    other export authority as may be required before exporting such information
     to foreign countries or providing access to foreign persons."
     """
@@ -78,5 +78,5 @@
             raise RuntimeError('File ' + filei + ' is empty')
 
-        dresp = [] # of struct()
+        dresp = []  # of struct()
         scm = struct()
         pcm = struct()
@@ -189,5 +189,5 @@
     [ntokens, tokens] = fltokens(fline)
 
-    if strncmpi(fline, '%eval_id interface', 18): # Dakota versions >= 6
+    if fline.startswith('%eval_id interface'):  # Dakota versions >= 6
         offset = 2
     else:  # Dakota versions < 6
@@ -223,5 +223,5 @@
     print('Number of rows (Dakota func evals) = ' + str(nrow))
 
-    # Calculate statistics
+    # Calculate statistic
     if (np.size(data, 0) > 1):
         #dmean = mean(data)
@@ -229,10 +229,10 @@
         [dmean, dstddev, dmeanci, dstddevci] = normfit_issm(data, 0.05)
     else:
-        dmean = np.zeros((1, np.size(data, 1)))
-        dstddev = np.zeros((1, np.size(data, 1)))
+        dmean = np.zeros((np.size(data, 1)))
+        dstddev = np.zeros((np.size(data, 1)))
         dmeanci = np.zeros((2, np.size(data, 1)))
         dstddevci = np.zeros((2, np.size(data, 1)))
         for i in range(np.size(data, 1)):
-            [dmean[0, i], dstddev[0, i], dmeanci[:, i], dstddevci[:, i]] = normfit_issm(data[:, i], 0.05)
+            dmean[i], dstddev[i], dmeanci[:, i], dstddevci[:, i] = normfit_issm(data[:, i], 0.05)
 
     dmin = data.min(axis=0)
@@ -244,6 +244,6 @@
     dmax95 = prctile_issm(data, 95, 0)
 
-    # NOTE: The following line may cause the following warning (should not 
-    # crash or invalidate results) when one of the inputs does not change with 
+    # NOTE: The following line may cause the following warning (should not
+    # crash or invalidate results) when one of the inputs does not change with
     # respect to the other(s), causing an internal divide-by-zero error,
     #
@@ -487,5 +487,5 @@
             fline = fidi.readline()
             icdf = 0
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('Cumulative Distribution Function'):
                 [ntokens, tokens] = fltokens(fline)
                 icdf = icdf + 1
@@ -546,5 +546,5 @@
             fline = fidi.readline()
             ipdf = 0
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'PDF for', 7):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('PDF for'):
                 [ntokens, tokens] = fltokens(fline)
                 ipdf = ipdf + 1
@@ -624,5 +624,5 @@
     ndresp = 0
 
-    while (fline != '' and not fline.isspace()) and strncmpi(fline, 'MV Statistics for ', 18):
+    while fline.startswith('MV Statistics for '):
 
         #  add new response function and moments
@@ -649,5 +649,5 @@
         dresp[-1].sens = []
 
-        while (fline != '' and not fline.isspace()) and strncmpi(fline, '  Importance Factor for variable ', 33):
+        while 'Importance Factor for variable' in fline:
             [ntokens, tokens] = fltokens(fline)
             idvar = idvar + 1
@@ -667,5 +667,5 @@
             dresp[-1].impfac = []
             dresp[-1].sens = []
-            while type(fline) == str and (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
+            while type(fline) == str and (fline != '' and not fline.isspace()) and 'Cumulative Distribution Function' not in fline and 'MV Statistics for ' not in fline and '---' not in fline:
                 fline = fidi.readline()
 
@@ -682,5 +682,5 @@
                     fline = fidi.readline()
 
-        while (fline != '' and not fline.isspace()) and strncmpi(fline, 'Cumulative Distribution Function', 32):
+        while fline.startswith('Cumulative Distribution Function'):
             [ntokens, tokens] = fltokens(fline)
 
@@ -705,5 +705,5 @@
             #  read and add cdf table to response function
             fline = fidi.readline()
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline:
                 [ntokens, tokens] = fltokens(fline)
                 icdf = icdf + 1
@@ -724,5 +724,5 @@
             print('    Cumulative Distribution Function not available')
             dresp[ndresp].cdf = []
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline:
                 fline = fidi.readline()
 
@@ -737,5 +737,5 @@
 
     if fline is None or fline == '' or fline.isspace():
-        fline = findline(fidi, ' < < < < < Best ')
+        fline = findline(fidi, '<<<<< Best ')
         if fline is None:
             return
@@ -747,10 +747,10 @@
     print('Reading values for best function evaluation:')
 
-    while (fline != '' and not fline.isspace()) and strncmpi(fline, ' < < < < < Best ', 11):
+    while fline.startswith('<<<<< Best '):
         [ntokens, tokens] = fltokens(fline)
 
     #  read and add best parameter(s)
 
-        if strncmpi(str(tokens[2]), 'parameter', 9):
+        if str(tokens[2]).startswith('parameter'):
             print('  ' + fline)
 
@@ -759,5 +759,5 @@
             dresp.best.descriptor = ''
 
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
                 [ntokens, tokens] = fltokens(fline)
                 dresp.best.param.append([0])
@@ -767,5 +767,5 @@
 
         #  read and add best objective function(s)
-        elif strncmpi(str(tokens[2]), 'objective', 9) and strncmpi(str(tokens[3]), 'function', 8):
+        elif str(tokens[2]).startswith('objective') and str(tokens[3]).startswith('function'):
             print('  ' + fline)
 
@@ -773,5 +773,5 @@
             dresp.best.of = []
 
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
                 [ntokens, tokens] = fltokens(fline)
                 dresp.best.of.append(0)
@@ -780,5 +780,5 @@
 
         #  read and add best residual norms
-        elif strncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'norm', 4):
+        elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('norm'):
             print('  ' + fline)
             dresp.best.norm = tokens[5]
@@ -787,9 +787,9 @@
             fline = fidi.readline()
 
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
+            while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<< Best ', 11):
                 fline = fidi.readline()
 
         #  read and add best residual term(s)
-        elif strncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'term', 4):
+        elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('term'):
             print('  ' + fline)
 
@@ -797,5 +797,5 @@
             dresp.best.res = []
 
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
                 [ntokens, tokens] = fltokens(fline)
                 dresp.best.res.append(0)
@@ -804,5 +804,5 @@
 
         #  read and add best constraint value(s)
-        elif strncmpi(str(tokens[2]), 'constraint', 10) and strncmpi(str(tokens[3]), 'value', 5):
+        elif str(tokens[2]).startswith('constraint') and str(tokens[3]).startswith('value'):
             print('  ' + fline)
 
@@ -810,5 +810,5 @@
             dresp.best.nc = []
 
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
                 [ntokens, tokens] = fltokens(fline)
                 dresp.best.nc.append(0)
@@ -817,5 +817,5 @@
 
         #  read and add best data captured
-        elif strncmpi(str(tokens[2]), 'data', 4) and strncmpi(str(tokens[3]), 'captured', 8):
+        elif str(tokens[2]).startswith('data') and str(tokens[3]).startswith('captured'):
             print('  ' + fline)
             dresp.best.eval = tokens[7]
@@ -823,5 +823,5 @@
             fline = fidi.readline()
 
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
                 fline = fidi.readline()
 
@@ -832,5 +832,5 @@
             fline = fidi.readline()
 
-            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
+            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
                 fline = fidi.readline()
 
@@ -903,5 +903,5 @@
     for fline in fidi:
         npos += len(fline)
-        if (strncmpi(fline, string, len(string))):
+        if fline.startswith(string):
             if goto_line:
                 fidi.seek(npos, 0)
Index: /issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py
===================================================================
--- /issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py	(revision 26182)
+++ /issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py	(revision 26183)
@@ -3,4 +3,5 @@
 import numpy as np
 from results import solution
+from netCDF4 import Dataset
 
 
@@ -75,11 +76,12 @@
 # }}}
 
+
 def parseresultsfromdiskioserial(md, filename):  # {{{
     # Open file
     try:
         fid = open(filename, 'rb')
-    except IOError as e:
+    except IOError:
         raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filename))
-
+    print(md.results)
     # Collect all results in a list
     allresults = []
@@ -87,5 +89,4 @@
         # Read next result
         result = ReadData(fid, md)
-
         if result is None:
             if allresults == []:
@@ -93,4 +94,7 @@
             else:
                 break
+        # print("now got {} for time {}".format(result['fieldname'], result['time']))
+        # print("with keys {}".format(result.keys()))
+        # print("==============================")
 
         allresults.append(result)
@@ -119,4 +123,72 @@
     return results
 # }}}
+
+
+def parseresultsfromdisktonc(md, filin, filout):  # {{{
+    verbose = 0
+    # Open file
+    try:
+        fid = open(filin, 'rb')
+    except IOError:
+        raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filin))
+
+    NCData = Dataset(filout, 'w', format='NETCDF4')
+    UnlimDim = NCData.createDimension('Unlim', None)
+    DimDict = {len(UnlimDim): 'Inf'}
+    dimindex = 2
+    dimlist = [2, md.mesh.numberofelements, md.mesh.numberofvertices, np.shape(md.mesh.elements)[1]]
+    dimnames = ['DictDummy', 'EltNum', 'VertNum', 'VertPerElt']
+    if verbose > 0:
+        print('===Creating dimensions ===')
+    for i, newdim in enumerate(dimlist):
+        if newdim not in list(DimDict.keys()):
+            dimindex += 1
+            NewDim = NCData.createDimension(dimnames[i], newdim)
+            DimDict[len(NewDim)] = dimnames[i]
+
+    typelist = [bool, str, int, float, complex,
+                collections.OrderedDict,
+                np.int64, np.ndarray, np.float64]
+
+    # Collect all results in a list
+    allresults = []
+    while True:
+        # Read next result
+        result = ReadData(fid, md)
+        if result is None:
+            if allresults == []:
+                raise Exception('no results found in binary file ' + filename)
+            else:
+                break
+        print("now got {} for time {}".format(result['fieldname'], result['time']))
+        print("with keys {}".format(result.keys()))
+        print("==============================")
+
+        allresults.append(result)
+    fid.close()
+
+    # Now, process all results and find out how many steps we have
+    numresults = len(allresults)
+    allsteps = np.zeros((numresults, 1))
+    for i in range(numresults):
+        allsteps[i] = allresults[i]['step']
+    pos = np.where(allsteps != -9999)
+    allsteps = np.sort(np.unique(allsteps[pos]))
+
+    # Ok, now construct structure
+    results = solution()
+
+    for i in range(numresults):
+        result = allresults[i]
+        index = 0
+        if result['step'] != -9999:
+            index = np.where(result['step'] == allsteps)[0][0]
+            setattr(results[index], 'step', result['step'])
+        if result['time'] != -9999:
+            setattr(results[index], 'time', result['time'])
+        setattr(results[index], result['fieldname'], result['field'])
+    return results
+# }}}
+
 
 def ReadData(fid, md):  # {{{
@@ -321,4 +393,5 @@
 # }}}
 
+
 def addfieldtorecord(a, descr): #{{{
     if a.dtype.fields is None:
