Index: /issm/trunk-jpl/src/m/classes/clusters/pace.m
===================================================================
--- /issm/trunk-jpl/src/m/classes/clusters/pace.m	(revision 27918)
+++ /issm/trunk-jpl/src/m/classes/clusters/pace.m	(revision 27919)
@@ -9,5 +9,5 @@
 	properties (SetAccess=public)
 	% {{{
-		name            = 'login-phoenix-4.pace.gatech.edu' %Phoenix cluster name
+		name            = 'login-phoenix-slurm.pace.gatech.edu' %Phoenix cluster name
 		login           = ''; %personal login
 		numnodes        = 1; %number of nodes requested
@@ -17,5 +17,5 @@
 		queue           = 'inferno'; %queue
 		time            = 60; %time requested per run [minutes]
-		accountname     = 'GT-arobel3-atlas'; %group account name
+		accountname     = 'gts-arobel3-atlas'; %group account name
 		codepath        = ''; %path to issm binaries
 		executionpath   = ''; %path for execution folder
@@ -62,17 +62,20 @@
 			fid=fopen([modelname '.queue'],'w');
 			fprintf(fid,'#!/bin/sh\n');
-			fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %conversion of walltime from minutes to seconds.
-			fprintf(fid,'#PBS -N %s\n',modelname);
-			fprintf(fid,'#PBS -l nodes=1:ppn=%i\n',cluster.np);
-			fprintf(fid,'#PBS -l pmem=%igb\n',cluster.mem);
-			fprintf(fid,'#PBS -q %s\n',cluster.queue);
-			fprintf(fid,'#PBS -A %s\n',cluster.accountname);
-		
-			fprintf(fid,'#PBS -o %s/%s/%s.outlog \n',cluster.executionpath,dirname,modelname);
-         fprintf(fid,'#PBS -e %s/%s/%s.errlog \n\n',cluster.executionpath,dirname,modelname);
 
-			fprintf(fid,'export PBS_O_WORKDIR=%s\n',[cluster.executionpath '/' dirname]);
-			fprintf(fid,'cd $PBS_O_WORKDIR\n');
-			fprintf(fid,'mpiexec -np %i %s/%s %s %s %s \n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);	
+			fprintf(fid,'#SBATCH -t%i\n',cluster.time);
+         fprintf(fid,'#SBATCH -J%s\n',modelname);
+         fprintf(fid,'#SBATCH -N 1 --ntasks-per-node=%i\n',cluster.np);
+         %fprintf(fid,'#SBATCH -N %i\n',cluster.numnodes);
+         %fprintf(fid,'#SBATCH --ntasks=1\n');
+         %fprintf(fid,'#SBATCH --cpus-per-task=%i\n',cluster.np);
+         fprintf(fid,'#SBATCH --mem-per-cpu=%iG\n',cluster.mem);
+         fprintf(fid,'#SBATCH -p%s\n',cluster.queue);
+         fprintf(fid,'#SBATCH -A %s\n',cluster.accountname);
+         fprintf(fid,'#SBATCH -o%s/%s/%s.outlog \n',cluster.executionpath,dirname,modelname);
+         fprintf(fid,'#SBATCH -e%s/%s/%s.errlog \n\n',cluster.executionpath,dirname,modelname);
+         fprintf(fid,'export SLURM_SUBMIT_DIR=%s\n',[cluster.executionpath '/' dirname]);
+         fprintf(fid,'cd $SLURM_SUBMIT_DIR\n');
+         fprintf(fid,'export LD_LIBRARY_PATH=/opt/slurm/current/lib:/opt/pmix/current/lib:$LD_LIBRARY_PATH \n');
+         fprintf(fid,'srun --mpi=pmi2 -n %i %s/%s %s %s %s \n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
 
 			fclose(fid);
@@ -97,8 +100,8 @@
 			disp('launching solution sequence on remote cluster');
 			if ~isempty(restart)
-				launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && qsub ' modelname '.queue '];
+				launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && sbatch ' modelname '.queue '];
 			else
 				launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ...
-					' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz  && qsub ' modelname '.queue '];
+					' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz  && sbatch ' modelname '.queue '];
 			end
 			issmssh(cluster.name,cluster.login,cluster.port,launchcommand);
