Index: ../trunk-jpl/src/m/classes/clusters/pfe.m =================================================================== --- ../trunk-jpl/src/m/classes/clusters/pfe.m (revision 18789) +++ ../trunk-jpl/src/m/classes/clusters/pfe.m (revision 18790) @@ -214,16 +214,13 @@ fclose(fid); end end %}}} - function BuildOceanQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{ + function BuildOceanQueueScript(np,cluster,modelname) % {{{ - %compute number of processors - cluster.np=cluster.numnodes*cluster.cpuspernode; - %write queuing script fid=fopen([modelname '.queue'],'w'); fprintf(fid,'#PBS -S /bin/bash\n'); - fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); - fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. + fprintf(fid,'#PBS -l select=1:ncpus=%i:model=%s\n',np,cluster.processor); + fprintf(fid,'#PBS -l walltime=%i\n',cluster.time); %walltime is in seconds. fprintf(fid,'#PBS -q %s \n',cluster.queue); fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); fprintf(fid,'#PBS -m e\n'); @@ -237,8 +234,8 @@ fprintf(fid,'module load gcc/4.4.4\n'); fprintf(fid,'export PATH="$PATH:."\n'); fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); - fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,modelname); - fprintf(fid,'mpiexec -np %i ./mitgcm\n',cluster.np); + fprintf(fid,['cd ' pwd() ' \n\n']); + fprintf(fid,'mpiexec -np %i ./mitgcmuv\n',np); % if ~io_gather, %concatenate the output files: % fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); % end