Index: sm/trunk-jpl/src/m/classes/clusters/hexagon.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/hexagon.py	(revision 26184)
+++ 	(revision )
@@ -1,145 +1,0 @@
-import subprocess
-from fielddisplay import fielddisplay
-from pairoptions import pairoptions
-from issmssh import issmssh
-from issmscpin import issmscpin
-from issmscpout import issmscpout
-from IssmConfig import IssmConfig
-import datetime
-try:
-    from hexagon_settings import hexagon_settings
-except ImportError:
-    print('You need hexagon_settings.py to proceed, check presence and sys.path')
-
-
-class hexagon(object):
-    """
-    Hexagon cluster class definition
-    Hexagon have nodes built of 2 * 16 CPUs. Nodes are dedicated to one job so the best usage is to use 32 procs per nodes (16 per cores) as it is what is billed anyway.
-    You can reduce this number if you run out of memory as the total node memory is divided by the number of procs
-       Usage:
-          cluster = hexagon()
-    """
-
-    def __init__(self, *args):  # {{{
-        self.name = 'hexagon'
-        self.login = ''
-        self.numnodes = 2
-        self.procspernodes = 32
-        self.mem = 32000
-        self.queue = 'batch'
-        self.time = 2 * 60
-        self.codepath = ''
-        self.executionpath = ''
-        self.interactive = 0
-        self.port = []
-        self.accountname = ''
-
-    #use provided options to change fields
-        options = pairoptions(*args)
-
-    #initialize cluster using user settings if provided
-        self = hexagon_settings(self)
-
-    #OK get other fields
-        self = options.AssignObjectFields(self)
-        self.np = self.numnodes * self.procspernodes
-    # }}}
-
-    def __repr__(self):      # {{{
-        #  display the object
-        s = "class hexagon object:"
-        s = "%s\n%s" % (s, fielddisplay(self, 'name', 'name of the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'login', 'login'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'numnodes', 'number of nodes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'procspernodes', 'number of mpi procs per nodes  default and optimal is 32'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'mem', 'Total node memory'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'queue', 'name of the queue'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'time', 'walltime requested in minutes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'codepath', 'code path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'execution path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'interactive', ''))
-        s = "%s\n%s" % (s, fielddisplay(self, 'accountname', 'your cluster account'))
-        return s
-    # }}}
-
-    def checkconsistency(self, md, solution, analyses):      # {{{
-        #mem should not be over 32000mb
-        #numprocs should not be over 4096
-        #we have cpupernodes * numberofcpus = mppwidth and mppnppn = cpupernodes,
-        #Miscelaneous
-        if not self.login:
-            md = md.checkmessage('login empty')
-        if not self.codepath:
-            md = md.checkmessage('codepath empty')
-        if not self.executionpath:
-            md = md.checkmessage('executionpath empty')
-        if self.interactive == 1:
-            md = md.checkmessage('interactive mode not implemented')
-        if self.mem > 32000:
-            md = md.checkmessage('asking too much memory max is 32000 per node')
-        return self
-    # }}}
-
-    def BuildQueueScript(self, dirname, modelname, solution, io_gather, isvalgrind, isgprof, isdakota, isoceancoupling):    # {{{
-        executable = 'issm.exe'
-        if isdakota:
-            version = IssmConfig('_DAKOTA_VERSION_')[0:2]
-            version = float(version)
-            if version >= 6:
-                executable = 'issm_dakota.exe'
-        if isoceancoupling:
-            executable = 'issm_ocean.exe'
-
-    #write queuing script
-        shortname = modelname[0:min(12, len(modelname))]
-        fid = open(modelname + '.queue', 'w')
-        fid.write('  #!/bin/bash\n')
-        fid.write('  #PBS - N %s \n' % shortname)
-        fid.write('  #PBS - l mppwidth=%i, mppnppn=%i\n' % (self.np, self.procspernodes))
-        timeobj = datetime.timedelta(minutes=self.time)
-        m, s = divmod(timeobj.total_seconds(), 60)
-        h, m = divmod(m, 60)
-        timestring = "%02d:%02d:%02d" % (h, m, s)
-        fid.write('#PBS -l walltime=%s\n' % timestring)  #walltime is hh:mm:ss
-        fid.write('#PBS -l mppmem=%imb\n' % int(self.mem / self.procspernodes))
-        fid.write('#PBS -A %s\n' % self.accountname)
-        fid.write('#PBS -o %s/%s/%s.outlog \n' % (self.executionpath, dirname, modelname))
-        fid.write('#PBS -e %s/%s/%s.errlog \n\n' % (self.executionpath, dirname, modelname))
-        fid.write('export ISSM_DIR="%s/../"\n' % self.codepath)
-        fid.write('export CRAY_ROOTFS=DSL\n')
-        fid.write('module swap PrgEnv-cray / 5.2.40 PrgEnv - gnu\n')
-        fid.write('module load cray-petsc\n')
-        fid.write('module load cray-tpsl\n')
-        fid.write('module load cray-mpich\n')
-        fid.write('module load gsl\n')
-        fid.write('cd %s/%s/\n\n' % (self.executionpath, dirname))
-        fid.write('aprun -B %s/%s %s %s/%s %s\n' % (self.codepath, executable, str(solution), self.executionpath, dirname, modelname))
-        fid.close()
-    # }}}
-
-    def UploadQueueJob(self, modelname, dirname, filelist):    # {{{
-        #compress the files into one zip.
-        compressstring = 'tar -zcf %s.tar.gz ' % dirname
-        for file in filelist:
-            compressstring += ' %s' % file
-        subprocess.call(compressstring, shell=True)
-
-        print('uploading input file and queueing script')
-        issmscpout(self.name, self.executionpath, self.login, self.port, [dirname + '.tar.gz'])
-    # }}}
-
-    def LaunchQueueJob(self, modelname, dirname, filelist, restart, batch):    # {{{
-        print('launching solution sequence on remote cluster')
-        if restart:
-            launchcommand = 'cd %s && cd %s && qsub %s.queue' % (self.executionpath, dirname, modelname)
-        else:
-            launchcommand = 'cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz  && qsub %s.queue' % (self.executionpath, dirname, dirname, dirname, dirname, dirname, modelname)
-        issmssh(self.name, self.login, self.port, launchcommand)
-    # }}}
-
-    def Download(self, dirname, filelist):    # {{{
-        #copy files from cluster to current directory
-        directory = '%s/%s/' % (self.executionpath, dirname)
-        issmscpin(self.name, self.login, self.port, directory, filelist)
-    # }}}
Index: sm/trunk-jpl/src/m/classes/clusters/vilje.m
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/vilje.m	(revision 26184)
+++ 	(revision )
@@ -1,209 +1,0 @@
-%vilje class definition
-%
-%   Usage:
-%      cluster=greenplanet();
-%      cluster=greenplanet('np',3);
-%      cluster=greenplanet('np',3,'login','username');
-
-classdef vilje
-	properties (SetAccess=public)  
-		% {{{
-		name           = 'vilje';
-		login          = '';
-		numnodes       = 2;
-		cpuspernode    = 32;
-		procspernodes  = 16;
-		mem            = 28;
-		numstreams		= 8; %Henning added
-		queue          = 'workq';
-		time           = 2*60;
-		codepath       = '';
-		executionpath  = '';
-		interactive    = 0;
-		port           = [];
-		accountname    = '';
-		% }}}
-	end
-	methods
-		function cluster=vilje(varargin) % {{{
-
-			%initialize cluster using default settings if provided
-			if (exist('vilje_settings')==2), vilje_settings; end
-
-			%use provided options to change fields
-			cluster=AssignObjectFields(pairoptions(varargin{:}),cluster);
-		end
-		%}}}
-		function disp(cluster) % {{{
-			%  display the object
-			disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1)));
-			disp(sprintf('    name: %s',cluster.name));
-			disp(sprintf('    login: %s',cluster.login));
-			disp(sprintf('    accountname: %s',cluster.accountname));
-			disp(sprintf('    numnodes: %i',cluster.numnodes));
-			disp(sprintf('    cpuspernode: %i',cluster.cpuspernode));
-			disp(sprintf('    np: %i', cluster.cpuspernode*cluster.numnodes));
-			disp(sprintf('    procspernodes: %i',cluster.procspernodes));
-			disp(sprintf('    queue: %s',cluster.queue));
-			disp(sprintf('    codepath: %s',cluster.codepath));
-			disp(sprintf('    executionpath: %s',cluster.executionpath));
-			disp(sprintf('    interactive: %i',cluster.interactive));
-			disp(sprintf('    time: %i',cluster.time));
-			disp(sprintf('    memory: %i',cluster.mem));
-		end
-		%}}}
-		function md = checkconsistency(cluster,md,solution,analyses) % {{{
-
-			available_queues={'workq'};
-			queue_requirements_time=[5*24*60];
-			queue_requirements_np=[30];
-
-			QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,cluster.queue,cluster.np,1)
-
-			%Miscelaneous
-			if isempty(cluster.login), md = checkmessage(md,'login empty'); end
-			if isempty(cluster.accountname), md = checkmessage(md,'accountname empty'); end
-			if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end
-			if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end
-
-		end
-		%}}}
-		function numprocs=np(self) % {{{
-			%compute number of processors
-			numprocs=self.numnodes*self.procspernodes;
-		end
-		%}}}
-		function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{
-
-			if(isvalgrind), disp('valgrind not supported by cluster, ignoring...'); end
-			if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
-
-			%compute number of processors
-% 			cluster.np=cluster.numnodes*cluster.cpuspernode;
-			np(cluster);%=cluster.numnodes*cluster.cpuspernode;
-
-			%write queuing script 
-			fid=fopen([modelname '.queue'],'w');
-			fprintf(fid,'#PBS -S /bin/bash\n');
-			fprintf(fid,'#PBS -N %s\n',modelname);
-			fprintf(fid,'#PBS -l select=%i:ncpus=%i:mpiprocs=%i\n',cluster.numnodes,cluster.cpuspernode,16);
-			
-			%calculate walltime in hh:mm:ss format
-			walltime=datestr(cluster.time/(60*24),'HH:MM:SS');
-			fprintf(fid,'#PBS -l walltime=%s\n',walltime); %walltime should be in hh:mm:ss
-			fprintf(fid,'#PBS -A %s\n',cluster.accountname);
-			fprintf(fid,'#PBS -o %s.outlog \n',modelname);
-			fprintf(fid,'#PBS -e %s.errlog \n\n',modelname);
-			fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME
-			fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n');       %FIXME
-			fprintf(fid,'module load intelcomp/17.0.0\n') 
-			fprintf(fid,'module load mpt/2.14\n')
-			fprintf(fid,'module load petsc/3.7.4d\n')
-			fprintf(fid,'module load parmetis/4.0.3\n') 
-			fprintf(fid,'module load mumps/5.0.2\n')
-			fprintf(fid,'cd %s/%s\n\n',cluster.executionpath,modelname);
-			fprintf(fid,'mpiexec_mpt -n %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname);
-			if ~io_gather, %concatenate the output files:
-				fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
-			end
-			fclose(fid);
-		end
-		%}}}
-		function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{
-
-			if(isvalgrind), disp('valgrind not supported by cluster, ignoring...'); end
-			if(isgprof),    disp('gprof not supported by cluster, ignoring...'); end
-
-			executable='issm.exe';
-			if isdakota,
-				version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));
-				if (version>=6),
-					executable='issm_dakota.exe';
-				end
-			end
-			if isoceancoupling,
-				executable='issm_ocean.exe';
-			end
-
-			%compute number of processors
-% 			cluster.np=cluster.numnodes*cluster.cpuspernode;
-			np(cluster);%=cluster.numnodes*cluster.cpuspernode;                     
-% 			shortname = substring(modelname,1,min(12,length(modelname)));
-
-			%write queuing script 
-			fid=fopen([modelname '.queue'],'w');
-			fprintf(fid,'#PBS -S /bin/bash\n');
-% 			fprintf(fid,'#PBS -N %s\n',shortname);
-			fprintf(fid,'#PBS -N %s\n',modelname);
-			fprintf(fid,'#PBS -q %s \n',cluster.queue);
-			fprintf(fid,'#PBS -l select=%i:ncpus=%i:mpiprocs=%i\n',cluster.numnodes,cluster.cpuspernode,cluster.procspernodes);
-
-			%calculate walltime in hh:mm:ss format
-			walltime=datestr(cluster.time/(60*24),'HH:MM:SS');
-% 			fprintf(fid,'#PBS -l walltime=%s\n',duration(0,cluster.time,0)); %walltime is in minutes.
-% 			fprintf(fid,'#PBS -l walltime=%s\n',10); %walltime is in minutes.
-			fprintf(fid,'#PBS -l walltime=%s\n',walltime); %walltime should be in hh:mm:ss
-% 			fprintf(fid,'#PBS -l walltime=%i\n',walltime); %walltime is in hh:mm:ss
-			fprintf(fid,'#PBS -A %s\n',cluster.accountname);
-			fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]);
-			fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]);
-			fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME
-			fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n');       %FIXME
-			fprintf(fid,'module load intelcomp/17.0.0\n') 
-			fprintf(fid,'module load mpt/2.14\n')
-			fprintf(fid,'module load petsc/3.7.4d\n')
-			fprintf(fid,'module load parmetis/4.0.3\n') 
-			fprintf(fid,'module load mumps/5.0.2\n')
-			fprintf(fid,'cd %s/%s\n\n',cluster.executionpath,dirname);
-			fprintf(fid,'mpiexec_mpt -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
-
-			if ~io_gather, %concatenate the output files:
-				fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
-			end
-			fclose(fid);
-
-			%in interactive mode, create a run file, and errlog and outlog file
-			if cluster.interactive,
-				fid=fopen([modelname '.run'],'w');
-				fprintf(fid,'mpiexec_mpt -np %i %s/issm.exe %s %s %s\n',cluster.np,cluster.codepath,solution,[cluster.executionpath '/' dirname],modelname);
-				if ~io_gather, %concatenate the output files:
-					fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);
-				end
-				fclose(fid);
-				fid=fopen([modelname '.errlog'],'w');
-				fclose(fid);
-				fid=fopen([modelname '.outlog'],'w');
-				fclose(fid);
-			end
-		end %}}}
-		function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{
-
-			%compress the files into one zip.
-			compressstring=['tar -zcf ' dirname '.tar.gz '];
-			for i=1:numel(filelist),
-				compressstring = [compressstring ' ' filelist{i}];
-			end
-			system(compressstring);
-			disp('uploading input file and queueing script');
-			directory=cluster.executionpath;
-% 			issmbbftpout(cluster.name,directory,cluster.login,cluster.port,cluster.numstreams,{[dirname '.tar.gz']});
-			issmscpout(cluster.name,directory,cluster.login,cluster.port,{[dirname '.tar.gz']});
-
-		end
-		%}}}
-		function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{
-
-			disp('launching solution sequence on remote cluster');
-			launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ...
-				' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz  && hostname && qsub ' modelname '.queue '];
-			issmssh(cluster.name,cluster.login,cluster.port,launchcommand);
-		end %}}}
-		function Download(cluster,dirname,filelist)% {{{
-
-			%copy files from cluster to current directory
-			directory=[cluster.executionpath '/' dirname '/'];
-			issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist);
-
-		end %}}}
-	end
-end
Index: sm/trunk-jpl/src/m/classes/clusters/vilje.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/vilje.py	(revision 26184)
+++ 	(revision )
@@ -1,145 +1,0 @@
-import subprocess
-from fielddisplay import fielddisplay
-from pairoptions import pairoptions
-from issmssh import issmssh
-from issmscpin import issmscpin
-from issmscpout import issmscpout
-from QueueRequirements import QueueRequirements
-from IssmConfig import IssmConfig
-import datetime
-try:
-    from vilje_settings import vilje_settings
-except ImportError:
-    print('You need vilje_settings.py to proceed, check presence and sys.path')
-
-
-class vilje(object):
-    """
-    Vilje cluster class definition
-
-       Usage:
-          cluster = vilje()
-    """
-
-    def __init__(self, *args):    # {{{
-        self.name = 'vilje'
-        self.login = ''
-        self.numnodes = 2
-        self.cpuspernode = 32
-        self.procspernodes = 16
-        self.mem = 28
-        self.queue = 'workq'
-        self.time = 2 * 60
-        self.codepath = ''
-        self.executionpath = ''
-        self.interactive = 0
-        self.port = []
-        self.accountname = ''
-
-    #use provided options to change fields
-        options = pairoptions(*args)
-
-    #initialize cluster using user settings if provided
-        self = vilje_settings(self)
-    #OK get other fields
-        self = options.AssignObjectFields(self)
-        self.np = self.numnodes * self.procspernodes
-    # }}}
-
-    def __repr__(self):    # {{{
-        #  display the object
-        s = "class vilje object:"
-        s = "%s\n%s" % (s, fielddisplay(self, 'name', 'name of the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'login', 'login'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'numnodes', 'number of nodes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'cpuspernode', 'number of nodes per CPUs (32)'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'procspernodes', 'number of mpi procs per nodes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'mem', 'node memory'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'queue', 'name of the queue (test is an option, workq the default)'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'time', 'walltime requested in minutes'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'codepath', 'code path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'execution path on the cluster'))
-        s = "%s\n%s" % (s, fielddisplay(self, 'interactive', ''))
-        s = "%s\n%s" % (s, fielddisplay(self, 'accountname', 'your cluster account'))
-        return s
-    # }}}
-
-    def checkconsistency(self, md, solution, analyses):    # {{{
-        #Queue dictionarry  gives queu name as key and max walltime and cpus as var
-        queuedict = {'workq': [5 * 24 * 60, 30],
-                     'test': [30, 4]}
-        QueueRequirements(queuedict, self.queue, self.np, self.time)
-
-    #Miscelaneous
-        if not self.login:
-            md = md.checkmessage('login empty')
-        if not self.codepath:
-            md = md.checkmessage('codepath empty')
-        if not self.executionpath:
-            md = md.checkmessage('executionpath empty')
-        if self.interactive == 1:
-            md = md.checkmessage('interactive mode not implemented')
-        return self
-    # }}}
-
-    def BuildQueueScript(self, dirname, modelname, solution, io_gather, isvalgrind, isgprof, isdakota, isoceancoupling):    # {{{
-        executable = 'issm.exe'
-        if isdakota:
-            version = IssmConfig('_DAKOTA_VERSION_')[0:2]
-            version = float(version)
-            if version >= 6:
-                executable = 'issm_dakota.exe'
-        if isoceancoupling:
-            executable = 'issm_ocean.exe'
-
-    #write queuing script
-        shortname = modelname[0:min(12, len(modelname))]
-        fid = open(modelname + '.queue', 'w')
-        fid.write('#PBS -S / bin / bash\n')
-        fid.write('#PBS -N %s \n' % shortname)
-        fid.write('#PBS -q %s \n' % self.queue)
-        fid.write('#PBS -l select=%i:ncpus=%i:mpiprocs=%s\n' % (self.numnodes, self.cpuspernode, self.procspernodes))
-        timeobj = datetime.timedelta(minutes=self.time)
-        m, s = divmod(timeobj.total_seconds(), 60)
-        h, m = divmod(m, 60)
-        timestring = "%02d:%02d:%02d" % (h, m, s)
-        fid.write('#PBS -l walltime=%s\n' % timestring)  #walltime is hh:mm:ss
-        fid.write('#PBS -A %s\n' % self.accountname)
-        fid.write('#PBS -o %s/%s/%s.outlog \n' % (self.executionpath, dirname, modelname))
-        fid.write('#PBS -e %s/%s/%s.errlog \n\n' % (self.executionpath, dirname, modelname))
-        fid.write('export ISSM_DIR="%s/../ "\n' % self.codepath)
-        fid.write('module load intelcomp/17.0.0\n')
-        fid.write('module load mpt/2.14\n')
-        fid.write('module load petsc/3.7.4d\n')
-        fid.write('module load parmetis/4.0.3\n')
-        fid.write('module load mumps/5.0.2\n')
-        fid.write('cd %s/%s/\n\n' % (self.executionpath, dirname))
-        fid.write('mpiexec_mpt -np %i %s/%s %s %s/%s %s\n' % (self.np, self.codepath, executable, str(solution), self.executionpath, dirname, modelname))
-        fid.close()
-    # }}}
-
-    def UploadQueueJob(self, modelname, dirname, filelist):    # {{{
-        #compress the files into one zip.
-        compressstring = 'tar -zcf %s.tar.gz ' % dirname
-        for file in filelist:
-            compressstring += ' %s' % file
-        subprocess.call(compressstring, shell=True)
-
-        print('uploading input file and queueing script')
-        issmscpout(self.name, self.executionpath, self.login, self.port, [dirname + '.tar.gz'])
-    # }}}
-
-    def LaunchQueueJob(self, modelname, dirname, filelist, restart, batch):    # {{{
-        print('launching solution sequence on remote cluster')
-        if restart:
-            launchcommand = 'cd %s && cd %s && qsub %s.queue' % (self.executionpath, dirname, modelname)
-        else:
-            launchcommand = 'cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz  && qsub %s.queue' % (self.executionpath, dirname, dirname, dirname, dirname, dirname, modelname)
-        issmssh(self.name, self.login, self.port, launchcommand)
-    # }}}
-
-    def Download(self, dirname, filelist):    # {{{
-        #copy files from cluster to current directory
-        directory = '%s/%s/' % (self.executionpath, dirname)
-        issmscpin(self.name, self.login, self.port, directory, filelist)
-    # }}}
Index: /issm/trunk-jpl/src/m/classes/model.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/model.py	(revision 26184)
+++ /issm/trunk-jpl/src/m/classes/model.py	(revision 26185)
@@ -38,8 +38,5 @@
 from generic import generic
 from pfe import pfe
-from vilje import vilje
-from hexagon import hexagon
 from cyclone import cyclone
-from stallo import stallo
 from saga import saga
 from balancethickness import balancethickness
@@ -185,5 +182,5 @@
     def __repr__(obj):  #{{{
         # TODO:
-        # - Convert all formatting to calls to <string>.format (see any 
+        # - Convert all formatting to calls to <string>.format (see any
         #   already converted <class>.__repr__ method for examples)
         #
