Index: ../trunk-jpl/src/m/solve/loadresultfromdisk.py =================================================================== --- ../trunk-jpl/src/m/solve/loadresultfromdisk.py (nonexistent) +++ ../trunk-jpl/src/m/solve/loadresultfromdisk.py (revision 25300) @@ -0,0 +1,87 @@ +import struct + +import numpy as np + + +def loadresultfromdisk(filename, step, name, *args): # {{{ + """ + LOADRESULTFROMDISK - load specific result of solution sequence from disk + file "filename" + + Usage: + variable = loadresultsfromdisk(filename, step, name) + + TODO: + - Test this module against output of src/m/solve/loadresultsfromdisk.m + """ + print('Got hwrwefew') + exit() + # Open file + try: + fid = open(filename, 'rb') + except IOError as e: + raise IOError("loadresultsfromdisk error message: could not open {} for binary reading".format(filename)) + + if len(args) == 4: + # Put the pointer on the right position in the file + fpos = args[0] + fid.seek(fpos) + + while True: + # read field + fpos = tell(fid) + length = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + + fieldname = struct.unpack('{}s'.format(length), fid.read(length))[0][:-1] + fieldname = fieldname.decode() #strings are binaries when stored so need to be converted back + rtime = struct.unpack('d', fid.read(struct.calcsize('d')))[0] + rstep = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + + # TODO: Check number of characters unpacked and break if need be (see + # src/m/solve/loadresultfromdisk.py) + + if (rstep == step) and (fieldname == name): + # ok, go read the result really + datatype = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + M = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + if datatype == 1: + field = np.array(struct.unpack('{}d'.format(M), fid.read(M * struct.calcsize('d'))), dtype=float) + elif datatype == 2: + field = struct.unpack('{}s'.format(M), fid.read(M))[0][:-1] + field = field.decode() + elif datatype == 3: + N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + field = np.zeros(shape=(M, N), dtype=float) + for i in range(M): + field[i, :] = struct.unpack('{}d'.format(N), fid.read(N * struct.calcsize('d'))) + elif datatype == 4: + N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + field = np.zeros(shape=(M, N), dtype=int) + for i in range(M): + field[i, :] = struct.unpack('{}i'.format(N), fid.read(N * struct.calcsize('i'))) + else: + raise TypeError("cannot read data of type {}".format(datatype)) + + variable = field + break + else: + # just skim to next results + datatype = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + M = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + if datatype == 1: + fid.seek(M * 8, 1) + elif datatype == 2: + fid.seek(M, 1) + elif datatype == 3: + N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + fid.seek(N * M * 8, 1) + elif datatype == 4: + N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] + fid.seek(N * M * 4, 1) + else: + raise TypeError("cannot read data of type {}".format(datatype)) + + fid.close() + + return (variable, fpos) +# }}} Index: ../trunk-jpl/src/m/solve/loadresultfromdisk.m =================================================================== --- ../trunk-jpl/src/m/solve/loadresultfromdisk.m (revision 25299) +++ ../trunk-jpl/src/m/solve/loadresultfromdisk.m (revision 25300) @@ -1,5 +1,5 @@ function [variable fpos]=loadresultfromdisk(filename,step,name,varargin) -%LOADRESULTFROMDISK - load specific result of solution sequence from disk file "filename" +%LOADRESULTFROMDISK - load specific result of solution sequence from disk file "filename" % % Usage: % variable=loadresultfromdisk(filename,step,name); @@ -11,11 +11,11 @@ end if nargin==4, + %Put the pointer on the right position in the file: fpos=varargin{1}; fseek(fid,fpos,'bof'); end - %Put the pointer on the right position in the file: while 1, %read field Index: ../trunk-jpl/src/m/solve/loadresultsfromcluster.py =================================================================== --- ../trunk-jpl/src/m/solve/loadresultsfromcluster.py (revision 25299) +++ ../trunk-jpl/src/m/solve/loadresultsfromcluster.py (revision 25300) @@ -1,8 +1,9 @@ import os +import platform import socket -import platform + +from helpers import * from loadresultsfromdisk import loadresultsfromdisk -from helpers import * def loadresultsfromcluster(md, runtimename=False): Index: ../trunk-jpl/src/m/solve/solve.py =================================================================== --- ../trunk-jpl/src/m/solve/solve.py (revision 25299) +++ ../trunk-jpl/src/m/solve/solve.py (revision 25300) @@ -136,7 +136,7 @@ filelist = [modelname + '.bin ', modelname + '.toolkits ', modelname + '.queue '] if md.qmu.isdakota: filelist.append(modelname + '.qmu.in') - + if not restart: cluster.UploadQueueJob(md.miscellaneous.name, md.private.runtimename, filelist) @@ -153,5 +153,4 @@ if md.verbose.solution: print('loading results from cluster') md = loadresultsfromcluster(md) - return md Index: ../trunk-jpl/src/m/classes/clusters/pfe.m =================================================================== --- ../trunk-jpl/src/m/classes/clusters/pfe.m (revision 25299) +++ ../trunk-jpl/src/m/classes/clusters/pfe.m (revision 25300) @@ -6,451 +6,451 @@ % cluster=pfe('np',3,'login','username'); classdef pfe - properties (SetAccess=public) - % {{{ - name = 'pfe' - login = ''; - modules = {'comp-intel/2016.2.181' 'mpi-sgi/mpt'}; - numnodes = 20; - cpuspernode = 8; - port = 1025; - queue = 'long'; - time = 12*60; - processor = 'ivy'; - srcpath = ''; - codepath = ''; - executionpath = ''; - grouplist = 's1690'; - interactive = 0; - bbftp = 0; - numstreams = 8; - hyperthreading = 0; - end - %}}} - methods - function cluster=pfe(varargin) % {{{ + properties (SetAccess=public) + % {{{ + name = 'pfe' + login = ''; + modules = {'comp-intel/2016.2.181' 'mpi-sgi/mpt'}; + numnodes = 20; + cpuspernode = 8; + port = 1025; + queue = 'long'; + time = 12*60; + processor = 'ivy'; + srcpath = ''; + codepath = ''; + executionpath = ''; + grouplist = 's1690'; + interactive = 0; + bbftp = 0; + numstreams = 8; + hyperthreading = 0; + end + %}}} + methods + function cluster=pfe(varargin) % {{{ - %initialize cluster using default settings if provided - if (exist('pfe_settings')==2), pfe_settings; end + %initialize cluster using default settings if provided + if (exist('pfe_settings')==2), pfe_settings; end - %use provided options to change fields - cluster=AssignObjectFields(pairoptions(varargin{:}),cluster); - end - %}}} - function disp(cluster) % {{{ - % display the object - disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1))); - disp(sprintf(' name: %s',cluster.name)); - disp(sprintf(' login: %s',cluster.login)); - modules=''; for i=1:length(cluster.modules), modules=[modules cluster.modules{i} ',']; end; modules=modules(1:end-1); - disp(sprintf(' modules: %s',modules)); - disp(sprintf(' port: %i',cluster.port)); - disp(sprintf(' numnodes: %i',cluster.numnodes)); - disp(sprintf(' cpuspernode: %i',cluster.cpuspernode)); - disp(sprintf(' np: %i',cluster.cpuspernode*cluster.numnodes)); - disp(sprintf(' queue: %s',cluster.queue)); - disp(sprintf(' time: %i',cluster.time)); - disp(sprintf(' processor: %s',cluster.processor)); - disp(sprintf(' codepath: %s ($ISSM_DIR on pfe)',cluster.codepath)); - disp(sprintf(' executionpath: %s (directory containing issm.exe on pfe)',cluster.executionpath)); - disp(sprintf(' grouplist: %s',cluster.grouplist)); - disp(sprintf(' interactive: %i',cluster.interactive)); - disp(sprintf(' hyperthreading: %i',cluster.hyperthreading)); - end - %}}} - function numprocs=np(cluster) % {{{ - %compute number of processors - numprocs=cluster.numnodes*cluster.cpuspernode; - end - %}}} - function md = checkconsistency(cluster,md,solution,analyses) % {{{ + %use provided options to change fields + cluster=AssignObjectFields(pairoptions(varargin{:}),cluster); + end + %}}} + function disp(cluster) % {{{ + % display the object + disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1))); + disp(sprintf(' name: %s',cluster.name)); + disp(sprintf(' login: %s',cluster.login)); + modules=''; for i=1:length(cluster.modules), modules=[modules cluster.modules{i} ',']; end; modules=modules(1:end-1); + disp(sprintf(' modules: %s',modules)); + disp(sprintf(' port: %i',cluster.port)); + disp(sprintf(' numnodes: %i',cluster.numnodes)); + disp(sprintf(' cpuspernode: %i',cluster.cpuspernode)); + disp(sprintf(' np: %i',cluster.cpuspernode*cluster.numnodes)); + disp(sprintf(' queue: %s',cluster.queue)); + disp(sprintf(' time: %i',cluster.time)); + disp(sprintf(' processor: %s',cluster.processor)); + disp(sprintf(' codepath: %s ($ISSM_DIR on pfe)',cluster.codepath)); + disp(sprintf(' executionpath: %s (directory containing issm.exe on pfe)',cluster.executionpath)); + disp(sprintf(' grouplist: %s',cluster.grouplist)); + disp(sprintf(' interactive: %i',cluster.interactive)); + disp(sprintf(' hyperthreading: %i',cluster.hyperthreading)); + end + %}}} + function numprocs=np(cluster) % {{{ + %compute number of processors + numprocs=cluster.numnodes*cluster.cpuspernode; + end + %}}} + function md = checkconsistency(cluster,md,solution,analyses) % {{{ - available_queues={'long','normal','debug','devel','alphatst@pbspl233'}; - queue_requirements_time=[5*24*60 8*60 2*60 2*60 24*60]; - queue_requirements_np=[2048 2048 150 150 2048]; + available_queues={'long','normal','debug','devel','alphatst@pbspl233'}; + queue_requirements_time=[5*24*60 8*60 2*60 2*60 24*60]; + queue_requirements_np=[2048 2048 150 150 2048]; - QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,cluster.queue,cluster.np,cluster.time) + QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,cluster.queue,cluster.np,cluster.time) - %now, check cluster.cpuspernode according to processor type - if strcmpi(cluster.processor,'wes'), - if cluster.hyperthreading, - if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''wes'' processors in hyperthreading mode'); - end - else - if ((cluster.cpuspernode>12 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 12 for ''wes'' processors'); - end - end - elseif strcmpi(cluster.processor,'ivy'), - if cluster.hyperthreading, - if ((cluster.cpuspernode>40 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 40 for ''ivy'' processors in hyperthreading mode'); - end - else - if ((cluster.cpuspernode>20 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 20 for ''ivy'' processors'); - end - end - elseif strcmpi(cluster.processor,'bro'), - if cluster.hyperthreading, - if ((cluster.cpuspernode>56 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 56 for ''bro'' processors in hyperthreading mode'); - end - else - if ((cluster.cpuspernode>28 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 28 for ''bro'' processors'); - end - end - elseif strcmpi(cluster.processor,'has'), - if cluster.hyperthreading, - if ((cluster.cpuspernode>48 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 48 for ''has'' processors in hyperthreading mode'); - end - else - if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''has'' processors'); - end - end - - elseif strcmpi(cluster.processor,'san'), - if cluster.hyperthreading, - if ((cluster.cpuspernode>32 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 32 for ''san'' processors in hyperthreading mode'); - end - else - if ((cluster.cpuspernode>16 ) | (cluster.cpuspernode<1)), - md = checkmessage(md,'cpuspernode should be between 1 and 16 for ''san'' processors'); - end - end + %now, check cluster.cpuspernode according to processor type + if strcmpi(cluster.processor,'wes'), + if cluster.hyperthreading, + if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''wes'' processors in hyperthreading mode'); + end + else + if ((cluster.cpuspernode>12 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 12 for ''wes'' processors'); + end + end + elseif strcmpi(cluster.processor,'ivy'), + if cluster.hyperthreading, + if ((cluster.cpuspernode>40 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 40 for ''ivy'' processors in hyperthreading mode'); + end + else + if ((cluster.cpuspernode>20 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 20 for ''ivy'' processors'); + end + end + elseif strcmpi(cluster.processor,'bro'), + if cluster.hyperthreading, + if ((cluster.cpuspernode>56 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 56 for ''bro'' processors in hyperthreading mode'); + end + else + if ((cluster.cpuspernode>28 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 28 for ''bro'' processors'); + end + end + elseif strcmpi(cluster.processor,'has'), + if cluster.hyperthreading, + if ((cluster.cpuspernode>48 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 48 for ''has'' processors in hyperthreading mode'); + end + else + if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''has'' processors'); + end + end + + elseif strcmpi(cluster.processor,'san'), + if cluster.hyperthreading, + if ((cluster.cpuspernode>32 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 32 for ''san'' processors in hyperthreading mode'); + end + else + if ((cluster.cpuspernode>16 ) | (cluster.cpuspernode<1)), + md = checkmessage(md,'cpuspernode should be between 1 and 16 for ''san'' processors'); + end + end - else - md = checkmessage(md,'unknown processor type, should be ''wes'' or ''has'' or ''ivy'' or ''san'''); - end + else + md = checkmessage(md,'unknown processor type, should be ''wes'' or ''has'' or ''ivy'' or ''san'''); + end - %Miscelaneous - if isempty(cluster.login), md = checkmessage(md,'login empty'); end - if isempty(cluster.srcpath), md = checkmessage(md,'srcpath empty'); end - if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end - if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end - if isempty(cluster.grouplist), md = checkmessage(md,'grouplist empty'); end + %Miscellaneous + if isempty(cluster.login), md = checkmessage(md,'login empty'); end + if isempty(cluster.srcpath), md = checkmessage(md,'srcpath empty'); end + if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end + if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end + if isempty(cluster.grouplist), md = checkmessage(md,'grouplist empty'); end - end - %}}} - function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{ + end + %}}} + function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{ - if(isgprof), disp('gprof not supported by cluster, ignoring...'); end + if(isgprof), disp('gprof not supported by cluster, ignoring...'); end - executable='issm.exe'; - if isdakota, - version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3)); - if (version>=6), - executable='issm_dakota.exe'; - end - end - if isoceancoupling, - executable='issm_ocean.exe'; - end + executable='issm.exe'; + if isdakota, + version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3)); + if (version>=6), + executable='issm_dakota.exe'; + end + end + if isoceancoupling, + executable='issm_ocean.exe'; + end - %write queuing script - fid=fopen([modelname '.queue'],'w'); - fprintf(fid,'#PBS -S /bin/bash\n'); -% fprintf(fid,'#PBS -N %s\n',modelname); - fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); - fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. - fprintf(fid,'#PBS -q %s \n',cluster.queue); - fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); - fprintf(fid,'#PBS -m e\n'); - fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); - fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); - fprintf(fid,'. /usr/share/modules/init/bash\n\n'); - for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end - fprintf(fid,'export PATH="$PATH:."\n\n'); - fprintf(fid,'export MPI_LAUNCH_TIMEOUT=520\n'); - fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); - fprintf(fid,'export ISSM_DIR="%s"\n',cluster.srcpath); %FIXME - fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME - fprintf(fid,'export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$ISSM_DIR/externalpackages/petsc/install/lib"\n'); - fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); - if ~isvalgrind, - fprintf(fid,'/u/scicon/tools/bin/several_tries mpiexec -np %i /u/scicon/tools/bin/mbind.x -cs -n%i %s/%s %s %s %s\n',cluster.np,cluster.cpuspernode,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); - else - fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); - end - if ~io_gather, %concatenate the output files: - fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); - end - fclose(fid); + %write queuing script + fid=fopen([modelname '.queue'],'w'); + fprintf(fid,'#PBS -S /bin/bash\n'); +% fprintf(fid,'#PBS -N %s\n',modelname); + fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); + fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. + fprintf(fid,'#PBS -q %s \n',cluster.queue); + fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); + fprintf(fid,'#PBS -m e\n'); + fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); + fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); + fprintf(fid,'. /usr/share/modules/init/bash\n\n'); + for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end + fprintf(fid,'export PATH="$PATH:."\n\n'); + fprintf(fid,'export MPI_LAUNCH_TIMEOUT=520\n'); + fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); + fprintf(fid,'export ISSM_DIR="%s"\n',cluster.srcpath); %FIXME + fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME + fprintf(fid,'export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$ISSM_DIR/externalpackages/petsc/install/lib"\n'); + fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); + if ~isvalgrind, + fprintf(fid,'/u/scicon/tools/bin/several_tries mpiexec -np %i /u/scicon/tools/bin/mbind.x -cs -n%i %s/%s %s %s %s\n',cluster.np,cluster.cpuspernode,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); + else + fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); + end + if ~io_gather, %concatenate the output files: + fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); + end + fclose(fid); - %in interactive mode, create a run file, and errlog and outlog file - if cluster.interactive, - fid=fopen([modelname '.run'],'w'); - if cluster.interactive==10, - fprintf(fid,'module unload mpi-mvapich2/1.4.1/gcc\n'); - fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[pwd() '/run'],modelname); - else - if ~isvalgrind, - fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); - else - fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); - end - end - if ~io_gather, %concatenate the output files: - fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); - end - fclose(fid); - fid=fopen([modelname '.errlog'],'w'); - fclose(fid); - fid=fopen([modelname '.outlog'],'w'); - fclose(fid); - end - end %}}} - function BuildQueueScriptMultipleModels(cluster,dirname,modelname,solution,dirnames,modelnames,nps) % {{{ + %in interactive mode, create a run file, and errlog and outlog file + if cluster.interactive, + fid=fopen([modelname '.run'],'w'); + if cluster.interactive==10, + fprintf(fid,'module unload mpi-mvapich2/1.4.1/gcc\n'); + fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[pwd() '/run'],modelname); + else + if ~isvalgrind, + fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); + else + fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); + end + end + if ~io_gather, %concatenate the output files: + fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); + end + fclose(fid); + fid=fopen([modelname '.errlog'],'w'); + fclose(fid); + fid=fopen([modelname '.outlog'],'w'); + fclose(fid); + end + end %}}} + function BuildQueueScriptMultipleModels(cluster,dirname,modelname,solution,dirnames,modelnames,nps) % {{{ - %some checks: - if isempty(modelname), error('BuildQueueScriptMultipleModels error message: need a non empty model name!');end + %some checks: + if isempty(modelname), error('BuildQueueScriptMultipleModels error message: need a non empty model name!');end - %what is the executable being called? - executable='issm_slr.exe'; + %what is the executable being called? + executable='issm_slr.exe'; - if ispc(), error('BuildQueueScriptMultipleModels not support yet on windows machines');end; + if ispc(), error('BuildQueueScriptMultipleModels not support yet on windows machines');end; - %write queuing script - fid=fopen([modelname '.queue'],'w'); + %write queuing script + fid=fopen([modelname '.queue'],'w'); - fprintf(fid,'#PBS -S /bin/bash\n'); - fprintf(fid,'#PBS -N %s\n',modelname); - fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); - fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. - fprintf(fid,'#PBS -q %s \n',cluster.queue); - fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); - fprintf(fid,'#PBS -m e\n'); - fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); - fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); - fprintf(fid,'. /usr/share/modules/init/bash\n\n'); - for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end - fprintf(fid,'export PATH="$PATH:."\n\n'); - fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); - fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME - fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME - fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); + fprintf(fid,'#PBS -S /bin/bash\n'); + fprintf(fid,'#PBS -N %s\n',modelname); + fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); + fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. + fprintf(fid,'#PBS -q %s \n',cluster.queue); + fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); + fprintf(fid,'#PBS -m e\n'); + fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); + fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); + fprintf(fid,'. /usr/share/modules/init/bash\n\n'); + for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end + fprintf(fid,'export PATH="$PATH:."\n\n'); + fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); + fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME + fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME + fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); - %number of cpus: - mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); + %number of cpus: + mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); - %executable: - mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; + %executable: + mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; - %solution name: - mpistring=[mpistring sprintf('%s ',solution)]; + %solution name: + mpistring=[mpistring sprintf('%s ',solution)]; - %execution directory and model name: - mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; + %execution directory and model name: + mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; - %inform main executable of how many icecaps, glaciers and earth models are being run: - mpistring=[mpistring sprintf(' %i ',length(dirnames))]; + %inform main executable of how many icecaps, glaciers and earth models are being run: + mpistring=[mpistring sprintf(' %i ',length(dirnames))]; - %icecaps, glaciers and earth location, names and number of processors associated: - for i=1:length(dirnames), - mpistring=[mpistring sprintf(' %s/%s %s %i ',cluster.executionpath,dirnames{i},modelnames{i},nps{i})]; - end + %icecaps, glaciers and earth location, names and number of processors associated: + for i=1:length(dirnames), + mpistring=[mpistring sprintf(' %s/%s %s %i ',cluster.executionpath,dirnames{i},modelnames{i},nps{i})]; + end - %write this long string to disk: - fprintf(fid,mpistring); - fclose(fid); + %write this long string to disk: + fprintf(fid,mpistring); + fclose(fid); - if cluster.interactive, - fid=fopen([modelname '.run'],'w'); + if cluster.interactive, + fid=fopen([modelname '.run'],'w'); - %number of cpus: - mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); + %number of cpus: + mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); - %executable: - mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; + %executable: + mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; - %solution name: - mpistring=[mpistring sprintf('%s ',solution)]; + %solution name: + mpistring=[mpistring sprintf('%s ',solution)]; - %execution directory and model name: - mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; + %execution directory and model name: + mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; - %inform main executable of how many icecaps, glaciers and earth models are being run: - mpistring=[mpistring sprintf(' %i ',length(dirnames))]; + %inform main executable of how many icecaps, glaciers and earth models are being run: + mpistring=[mpistring sprintf(' %i ',length(dirnames))]; - %icecaps, glaciers and earth location, names and number of processors associated: - for i=1:length(dirnames), - mpistring=[mpistring sprintf(' %s/Interactive%i %s %i ',cluster.executionpath,cluster.interactive,modelnames{i},nps{i})]; - end + %icecaps, glaciers and earth location, names and number of processors associated: + for i=1:length(dirnames), + mpistring=[mpistring sprintf(' %s/Interactive%i %s %i ',cluster.executionpath,cluster.interactive,modelnames{i},nps{i})]; + end - %write this long string to disk: - fprintf(fid,mpistring); - fclose(fid); + %write this long string to disk: + fprintf(fid,mpistring); + fclose(fid); - fid=fopen([modelname '.errlog'],'w'); - fclose(fid); - fid=fopen([modelname '.outlog'],'w'); - fclose(fid); - end - end - %}}} - function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{ + fid=fopen([modelname '.errlog'],'w'); + fclose(fid); + fid=fopen([modelname '.outlog'],'w'); + fclose(fid); + end + end + %}}} + function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{ - if(isgprof), disp('gprof not supported by cluster, ignoring...'); end + if(isgprof), disp('gprof not supported by cluster, ignoring...'); end - %write queuing script - fid=fopen([modelname '.queue'],'w'); - fprintf(fid,'#PBS -S /bin/bash\n'); - % fprintf(fid,'#PBS -N %s\n',modelname); - fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); - fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. - fprintf(fid,'#PBS -q %s \n',cluster.queue); - fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); - fprintf(fid,'#PBS -m e\n'); - fprintf(fid,'#PBS -o %s.outlog \n',modelname); - fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); - fprintf(fid,'. /usr/share/modules/init/bash\n\n'); - for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end - fprintf(fid,'export PATH="$PATH:."\n'); - fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME - fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME - fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); - fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,modelname); - fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); %FIXME - if ~io_gather, %concatenate the output files: - fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); - end - fclose(fid); + %write queuing script + fid=fopen([modelname '.queue'],'w'); + fprintf(fid,'#PBS -S /bin/bash\n'); + % fprintf(fid,'#PBS -N %s\n',modelname); + fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); + fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. + fprintf(fid,'#PBS -q %s \n',cluster.queue); + fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); + fprintf(fid,'#PBS -m e\n'); + fprintf(fid,'#PBS -o %s.outlog \n',modelname); + fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); + fprintf(fid,'. /usr/share/modules/init/bash\n\n'); + for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end + fprintf(fid,'export PATH="$PATH:."\n'); + fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME + fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME + fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); + fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,modelname); + fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); %FIXME + if ~io_gather, %concatenate the output files: + fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); + end + fclose(fid); - %in interactive mode, create a run file, and errlog and outlog file - if cluster.interactive, - fid=fopen([modelname '.run'],'w'); - if ~isvalgrind, - fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); - else - fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); - end - if ~io_gather, %concatenate the output files: - fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); - end - fclose(fid); - fid=fopen([modelname '.errlog'],'w'); - fclose(fid); - fid=fopen([modelname '.outlog'],'w'); - fclose(fid); - end - end %}}} - function BuildOceanQueueScript(np,cluster,modelname) % {{{ + %in interactive mode, create a run file, and errlog and outlog file + if cluster.interactive, + fid=fopen([modelname '.run'],'w'); + if ~isvalgrind, + fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); + else + fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); + end + if ~io_gather, %concatenate the output files: + fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); + end + fclose(fid); + fid=fopen([modelname '.errlog'],'w'); + fclose(fid); + fid=fopen([modelname '.outlog'],'w'); + fclose(fid); + end + end %}}} + function BuildOceanQueueScript(np,cluster,modelname) % {{{ - %write queuing script - fid=fopen([modelname '.queue'],'w'); - fprintf(fid,'#PBS -S /bin/bash\n'); - fprintf(fid,'#PBS -l select=1:ncpus=%i:model=%s\n',np,cluster.processor); - fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. - fprintf(fid,'#PBS -q %s \n',cluster.queue); - fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); - fprintf(fid,'#PBS -m e\n'); - fprintf(fid,'#PBS -o %s.outlog \n',modelname); - fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); - fprintf(fid,'. /usr/share/modules/init/bash\n\n'); - %for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end %FIXME: should use this! - fprintf(fid,'module load comp-intel/2016.2.181\n'); - fprintf(fid,'module load netcdf/4.4.1.1_mpt\n'); - fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); - fprintf(fid,'export PATH="$PATH:."\n'); - fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); - fprintf(fid,['cd ' pwd() ' \n\n']); - fprintf(fid,'mpiexec -np %i ./mitgcmuv\n',np); - % if ~io_gather, %concatenate the output files: - % fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); - % end - fclose(fid); + %write queuing script + fid=fopen([modelname '.queue'],'w'); + fprintf(fid,'#PBS -S /bin/bash\n'); + fprintf(fid,'#PBS -l select=1:ncpus=%i:model=%s\n',np,cluster.processor); + fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. + fprintf(fid,'#PBS -q %s \n',cluster.queue); + fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); + fprintf(fid,'#PBS -m e\n'); + fprintf(fid,'#PBS -o %s.outlog \n',modelname); + fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); + fprintf(fid,'. /usr/share/modules/init/bash\n\n'); + %for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end %FIXME: should use this! + fprintf(fid,'module load comp-intel/2016.2.181\n'); + fprintf(fid,'module load netcdf/4.4.1.1_mpt\n'); + fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); + fprintf(fid,'export PATH="$PATH:."\n'); + fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); + fprintf(fid,['cd ' pwd() ' \n\n']); + fprintf(fid,'mpiexec -np %i ./mitgcmuv\n',np); + % if ~io_gather, %concatenate the output files: + % fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); + % end + fclose(fid); - %in interactive mode, create a run file, and errlog and outlog file - if cluster.interactive, - fid=fopen([modelname '.run'],'w'); - fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); - fprintf(fid,['mpiexec -np %i ./mitgcmuv \n'],np); - fprintf(fid,['touch ' modelname '.lock %s\n']); - fclose(fid); - fid=fopen([modelname '.errlog'],'w'); - fclose(fid); - fid=fopen([modelname '.outlog'],'w'); - fclose(fid); - end + %in interactive mode, create a run file, and errlog and outlog file + if cluster.interactive, + fid=fopen([modelname '.run'],'w'); + fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); + fprintf(fid,['mpiexec -np %i ./mitgcmuv \n'],np); + fprintf(fid,['touch ' modelname '.lock %s\n']); + fclose(fid); + fid=fopen([modelname '.errlog'],'w'); + fclose(fid); + fid=fopen([modelname '.outlog'],'w'); + fclose(fid); + end - end %}}} - function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{ + end %}}} + function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{ - %compress the files into one zip. - compressstring=['tar -zcf ' dirname '.tar.gz ']; - for i=1:numel(filelist), - compressstring = [compressstring ' ' filelist{i}]; - end - if cluster.interactive, - compressstring = [compressstring ' ' modelname '.run ' modelname '.errlog ' modelname '.outlog ']; - end - system(compressstring); + %compress the files into one zip. + compressstring=['tar -zcf ' dirname '.tar.gz ']; + for i=1:numel(filelist), + compressstring = [compressstring ' ' filelist{i}]; + end + if cluster.interactive, + compressstring = [compressstring ' ' modelname '.run ' modelname '.errlog ' modelname '.outlog ']; + end + system(compressstring); - disp('uploading input file and queueing script'); - if cluster.interactive==10, - directory=[pwd() '/run/']; - elseif cluster.interactive, - directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive)]; - else - directory=cluster.executionpath; - end + disp('uploading input file and queueing script'); + if cluster.interactive==10, + directory=[pwd() '/run/']; + elseif cluster.interactive, + directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive)]; + else + directory=cluster.executionpath; + end - if ~cluster.bbftp, - issmscpout(cluster.name,directory,cluster.login,cluster.port,{[dirname '.tar.gz']}); - else - issmbbftpout(cluster.name,directory,cluster.login,cluster.port,cluster.numstreams,{[dirname '.tar.gz']}); - end + if ~cluster.bbftp, + issmscpout(cluster.name,directory,cluster.login,cluster.port,{[dirname '.tar.gz']}); + else + issmbbftpout(cluster.name,directory,cluster.login,cluster.port,cluster.numstreams,{[dirname '.tar.gz']}); + end - end - %}}} - function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{ + end + %}}} + function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{ - %lauch command, to be executed via ssh - if ~cluster.interactive, - if ~isempty(restart) - launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && qsub ' modelname '.queue ']; - else - launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ... - ' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz && qsub ' modelname '.queue ']; - end - else - if ~isempty(restart) - launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive)]; - else - if cluster.interactive==10, - launchcommand=['cd ' pwd() '/run && tar -zxf ' dirname '.tar.gz']; - else - launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive) ' && tar -zxf ' dirname '.tar.gz']; - end - end - end + %lauch command, to be executed via ssh + if ~cluster.interactive, + if ~isempty(restart) + launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && qsub ' modelname '.queue ']; + else + launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ... + ' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz && qsub ' modelname '.queue ']; + end + else + if ~isempty(restart) + launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive)]; + else + if cluster.interactive==10, + launchcommand=['cd ' pwd() '/run && tar -zxf ' dirname '.tar.gz']; + else + launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive) ' && tar -zxf ' dirname '.tar.gz']; + end + end + end - disp('launching solution sequence on remote cluster'); - issmssh(cluster.name,cluster.login,cluster.port,launchcommand); - end - %}}} - function Download(cluster,dirname,filelist)% {{{ + disp('launching solution sequence on remote cluster'); + issmssh(cluster.name,cluster.login,cluster.port,launchcommand); + end + %}}} + function Download(cluster,dirname,filelist)% {{{ - %copy files from cluster to current directory - if cluster.interactive==10, - directory=[pwd() '/run/']; - elseif ~cluster.interactive, - directory=[cluster.executionpath '/' dirname '/']; - else - directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive) '/']; - end + %copy files from cluster to current directory + if cluster.interactive==10, + directory=[pwd() '/run/']; + elseif ~cluster.interactive, + directory=[cluster.executionpath '/' dirname '/']; + else + directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive) '/']; + end - if ~cluster.bbftp, - issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist); - else - issmbbftpin(cluster.name, cluster.login, cluster.port, cluster.numstreams, directory, filelist); - end + if ~cluster.bbftp, + issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist); + else + issmbbftpin(cluster.name, cluster.login, cluster.port, cluster.numstreams, directory, filelist); + end - end %}}} + end %}}} end end Index: ../trunk-jpl/src/m/classes/clusters/pfe.py =================================================================== --- ../trunk-jpl/src/m/classes/clusters/pfe.py (revision 25299) +++ ../trunk-jpl/src/m/classes/clusters/pfe.py (revision 25300) @@ -31,6 +31,7 @@ self.queue = 'long' self.time = 12 * 60 self.processor = 'wes' + self.srcpath = '' self.codepath = '' self.executionpath = '' self.grouplist = 's1010' @@ -60,11 +61,11 @@ s = "%s\n%s" % (s, fielddisplay(self, 'cpuspernode', 'number of nodes per CPUs')) s = "%s\n%s" % (s, fielddisplay(self, 'np', 'number of CPUs')) s = "%s\n%s" % (s, fielddisplay(self, 'port', 'machine access port')) - s = "%s\n%s" % (s, fielddisplay(self, 'codepath', 'code path on the cluster')) - s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'execution path on the cluster')) s = "%s\n%s" % (s, fielddisplay(self, 'queue', 'name of the queue')) s = "%s\n%s" % (s, fielddisplay(self, 'time', 'walltime requested')) s = "%s\n%s" % (s, fielddisplay(self, 'processor', 'type of processor')) + s = "%s\n%s" % (s, fielddisplay(self, 'codepath', '$ISSM_DIR on pfe')) + s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'directory containing issm.exe on pfe')) s = "%s\n%s" % (s, fielddisplay(self, 'grouplist', 'name of the group')) s = "%s\n%s" % (s, fielddisplay(self, 'interactive', '')) s = "%s\n%s" % (s, fielddisplay(self, 'bbftp', '')) @@ -112,9 +113,11 @@ else: md = md.checkmessage('unknown processor type, should be ''neh'', ''wes'' or ''har'' or ''ivy''') - #Miscelaneous + #Miscellaneous if not self.login: md = md.checkmessage('login empty') + if not self.srcpath: + md = md.checkmessage('srcpath empty') if not self.codepath: md = md.checkmessage('codepath empty') if not self.executionpath: @@ -152,8 +155,9 @@ fid.write('module load mpi-sgi/mpt.2.11r13\n') fid.write('export PATH="$PATH:."\n\n') fid.write('export MPI_GROUP_MAX=64\n\n') - fid.write('export ISSM_DIR="%s/../ "\n' % self.codepath) + fid.write('export ISSM_DIR="%s"\n' % self.srcpath) fid.write('source $ISSM_DIR/etc/environment.sh\n') + fid.write('export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$ISSM_DIR/externalpackages/petsc/install/lib"\n') fid.write('cd %s/%s/ \n\n' % (self.executionpath, dirname)) fid.write('mpiexec - np %i %s/%s %s %s/%s %s\n' % (self.nprocs(), self.codepath, executable, str(solution), self.executionpath, dirname, modelname)) Index: ../trunk-jpl/src/m/classes/fourierlove.py =================================================================== --- ../trunk-jpl/src/m/classes/fourierlove.py (revision 25299) +++ ../trunk-jpl/src/m/classes/fourierlove.py (revision 25300) @@ -64,9 +64,9 @@ self.frequencies = [0] #Hz self.sh_nmax = 256 # .35 degree, 40 km at the equator. self.sh_nmin = 1 - self.g0 = 10 # m / s^2 - self.r0 = 6378e3 #m - self.mu0 = 1e11 # Pa + self.g0 = 9.81 # m/s^2 + self.r0 = 6371e3 #m + self.mu0 = 1e11 # Pa self.allow_layer_deletion = 1 self.love_kernels = 0 self.forcing_type = 11 Index: ../trunk-jpl/test/Par/GiaIvinsBenchmarksAB.py =================================================================== --- ../trunk-jpl/test/Par/GiaIvinsBenchmarksAB.py (revision 25299) +++ ../trunk-jpl/test/Par/GiaIvinsBenchmarksAB.py (revision 25300) @@ -11,6 +11,7 @@ from verbose import * from SetIceSheetBC import * + rad = 600000. nv = md.mesh.numberofvertices if (np.isnan(md.geometry.thickness)): @@ -20,7 +21,7 @@ if (dist <= rad): md.geometry.thickness[i] = 2000.0 else: - md.geometry.thickness[i] = 1.0 # non-zero thickness + md.geometry.thickness[i] = 0 md.geometry.thickness = md.geometry.thickness.reshape(-1, 1) md.geometry.base = np.zeros((md.mesh.numberofvertices, 1)) Index: ../trunk-jpl/test/Par/GiaIvinsBenchmarksCD.py =================================================================== --- ../trunk-jpl/test/Par/GiaIvinsBenchmarksCD.py (revision 25299) +++ ../trunk-jpl/test/Par/GiaIvinsBenchmarksCD.py (revision 25300) @@ -1,5 +1,4 @@ #Geometry specific to Experiments C and D - import inspect import os.path @@ -22,7 +21,7 @@ if (dist <= rad): md.geometry.thickness[i] = 3000.0 else: - md.geometry.thickness[i] = 1.0 # non - zero thickness + md.geometry.thickness[i] = 0 md.geometry.thickness = md.geometry.thickness.reshape(-1, 1) md.geometry.base = np.zeros((md.mesh.numberofvertices, 1)) Index: ../trunk-jpl/test/NightlyRun/test2001.py =================================================================== --- ../trunk-jpl/test/NightlyRun/test2001.py (revision 25299) +++ ../trunk-jpl/test/NightlyRun/test2001.py (revision 25300) @@ -31,11 +31,13 @@ #Define loading history (see test2001.m for the description) md.timestepping.start_time = 2400000 # 2, 400 kyr md.timestepping.final_time = 2500000 # 2, 500 kyr -md.geometry.thickness = np.vstack((np.hstack((md.geometry.thickness * 0.0, 0.0)), - np.hstack((md.geometry.thickness / 2.0, 0.1)), - np.hstack((md.geometry.thickness, 0.2)), - np.hstack((md.geometry.thickness, 1.0)), - np.hstack((md.geometry.thickness, md.timestepping.start_time)))).T +md.geometry.thickness = np.array([ + np.append(md.geometry.thickness * 0.0, 0.0), + np.append(md.geometry.thickness / 2.0, 0.1), + np.append(md.geometry.thickness, 0.2), + np.append(md.geometry.thickness, 1.0), + np.append(md.geometry.thickness, md.timestepping.start_time) + ]).T #Solve for GIA deflection md.cluster = generic('name', gethostname(), 'np', 3) Index: ../trunk-jpl/test/NightlyRun/test2051.m =================================================================== --- ../trunk-jpl/test/NightlyRun/test2051.m (revision 25299) +++ ../trunk-jpl/test/NightlyRun/test2051.m (revision 25300) @@ -23,7 +23,7 @@ % find out elements that have zero loads throughout the loading history. pos = find(sum(abs(md.geometry.thickness(1:end-1,:)),2)==0); -md.mask.ice_levelset(pos)=1; % no-ice. +md.mask.ice_levelset(pos)=1; % no ice md.cluster=generic('name',oshostname(),'np',3); md.verbose=verbose('1111111'); @@ -37,8 +37,8 @@ %Test Name: GiaIvinsBenchmarksAB2dA2 %% different evaluation time. {{{ -md.timestepping.start_time=2005100; % after 5 kyr of deglaciation -md.geometry.thickness(end,end) = md.timestepping.start_time; +md.timestepping.start_time=2005100; % after 5 kyr of deglaciation +md.geometry.thickness(end,end) = md.timestepping.start_time; md=solve(md,'Gia'); Index: ../trunk-jpl/test/NightlyRun/test2053.m =================================================================== --- ../trunk-jpl/test/NightlyRun/test2053.m (revision 25299) +++ ../trunk-jpl/test/NightlyRun/test2053.m (revision 25300) @@ -8,7 +8,7 @@ md.gia.cross_section_shape=2; % for elliptical edge % evaluation time (termed start_time) -md.timestepping.start_time=0.3; % for t \approx 0 kyr : to get eleastic response! +md.timestepping.start_time=0.3; % for t \approx 0 kyr : to get elastic response! md.timestepping.final_time=2500000; % 2,500 kyr %% define loading history {{{ @@ -22,7 +22,7 @@ % find out elements that have zero loads throughout the loading history. pos = find(sum(abs(md.geometry.thickness(1:end-1,:)),2)==0); -md.mask.ice_levelset(pos)=1; % no-ice. +md.mask.ice_levelset(pos)=1; % no ice md.cluster=generic('name',oshostname(),'np',3); md.verbose=verbose('1111111'); Index: ../trunk-jpl/test/NightlyRun/test2051.py =================================================================== --- ../trunk-jpl/test/NightlyRun/test2051.py (revision 25299) +++ ../trunk-jpl/test/NightlyRun/test2051.py (revision 25300) @@ -1,4 +1,4 @@ -#Test Name: GiaIvinsBenchmarksAB2dA1 +#Test Name: GiaIvinsBenchmarksAB2dA from socket import gethostname import numpy as np @@ -16,23 +16,58 @@ md = parameterize(md, '../Par/GiaIvinsBenchmarksAB.py') # indicate what you want to compute -md.gia.cross_section_shape = 1 # for square-edged x - section +md.gia.cross_section_shape = 1 # for square-edged x-section -# define loading history +# evaluation time (termed start_time) md.timestepping.start_time = 2002100 # after 2 kyr of deglaciation -md.timestepping.final_time = 2500000 # 2, 500 kyr -md.geometry.thickness = np.array([np.append(md.geometry.thickness * 0.0, 0.0), - np.append(md.geometry.thickness, 1000), - np.append(md.geometry.thickness, 2000000), - np.append(md.geometry.thickness * 0.0, 2000100), - np.append(md.geometry.thickness * 0.0, md.timestepping.start_time)]).T +md.timestepping.final_time = 2500000 # 2,500 kyr -# solve for GIA deflection +# define loading history +md.geometry.thickness = np.array([ + np.append(md.geometry.thickness * 0.0, 0.0), + np.append(md.geometry.thickness, 1000), + np.append(md.geometry.thickness, 2000000), + np.append(md.geometry.thickness * 0.0, 2000100), + np.append(md.geometry.thickness * 0.0, md.timestepping.start_time) + ]).T + +# find out the elements that have zero loads throughout the loading history +pos = np.where(np.abs(md.geometry.thickness[0:-2, :].sum(axis=1)) == 0)[0] +md.mask.ice_levelset[pos] = 1 # no ice + md.cluster = generic('name', gethostname(), 'np', 3) md.verbose = verbose('1111111') + +# solve for GIA deflection md = solve(md, 'Gia') -#Fields and tolerances to track changes -field_names = ['UGia', 'UGiaRate'] -field_tolerances = [1e-13, 1e-13] -field_values = [md.results.GiaSolution.UGia, md.results.GiaSolution.UGiaRate] +# Test Name: GiaIvinsBenchmarksAB2dA1 +U_AB2dA1 = md.results.GiaSolution.UGia +URate_AB2dA1 = md.results.GiaSolution.UGiaRate + +# Test Name: GiaIvinsBenchmarksAB2dA2 +# different evaluation time # {{{ +md.timestepping.start_time = 2005100 # after 5 kyr of deglaciation +md.geometry.thickness[-1, -1] = md.timestepping.start_time + +md = solve(md, 'Gia') + +U_AB2dA2 = md.results.GiaSolution.UGia +URate_AB2dA2 = md.results.GiaSolution.UGiaRate +# }}} + +# Test Name: GiaIvinsBenchmarksAB2dA3 +# different evaluation time # {{{ +md.timestepping.start_time = 2010100 # after 10 kyr of deglaciation +md.geometry.thickness[-1, -1] = md.timestepping.start_time + +md = solve(md, 'Gia') + +U_AB2dA3 = md.results.GiaSolution.UGia +URate_AB2dA3 = md.results.GiaSolution.UGiaRate +# }}} + +# Fields and tolerances to track changes +field_names = ['U_AB2dA1','URate_AB2dA1','U_AB2dA2','URate_AB2dA2','U_AB2dA3','URate_AB2dA3'] +field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13] +field_values = [U_AB2dA1, URate_AB2dA1, U_AB2dA2, URate_AB2dA2, U_AB2dA3, URate_AB2dA3] Index: ../trunk-jpl/test/NightlyRun/test2052.py =================================================================== --- ../trunk-jpl/test/NightlyRun/test2052.py (revision 25299) +++ ../trunk-jpl/test/NightlyRun/test2052.py (revision 25300) @@ -1,4 +1,4 @@ -#Test Name: GiaIvinsBenchmarksAB2dC1 +#Test Name: GiaIvinsBenchmarksAB2dC from socket import gethostname import numpy as np @@ -10,28 +10,63 @@ from triangle import * -#Benchmark experiments (Figure A2c Ivins and James, 1999, Geophys. J. Int.) -md = triangle(model(), '../Exp/RoundFrontEISMINT.exp', 200000) +# Benchmark experiments (Figure A2a Ivins and James, 1999, Geophys. J. Int.) +md = triangle(model(), '../Exp/RoundFrontEISMINT.exp', 200000.) md = setmask(md, '', '') md = parameterize(md, '../Par/GiaIvinsBenchmarksCD.py') -#indicate what you want to compute +# indicate what you want to compute md.gia.cross_section_shape = 1 # for square-edged x-section -#define loading history -md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get eleastic response! +# evaluation time (termed start_time) +md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get elastic response! md.timestepping.final_time = 2500000 # 2,500 kyr -md.geometry.thickness = np.array([np.append(md.geometry.thickness * 0.0, 0.0), - np.append(md.geometry.thickness / 2.0, 0.1), - np.append(md.geometry.thickness, 0.2), - np.append(md.geometry.thickness, md.timestepping.start_time)]).T -#solve for GIA deflection +# define loading history +md.geometry.thickness = np.array([ + np.append(md.geometry.thickness * 0.0, 0.0), + np.append(md.geometry.thickness / 2.0, 0.1), + np.append(md.geometry.thickness, 0.2), + np.append(md.geometry.thickness, md.timestepping.start_time) + ]).T + +# find out elements that have zero loads throughout the loading history +pos = np.where(np.abs(md.geometry.thickness[0:-2, :].sum(axis=1)) == 0)[0] +md.mask.ice_levelset[pos] = 1 # no ice + md.cluster = generic('name', gethostname(), 'np', 3) md.verbose = verbose('1111111') + +# solve for GIA deflection md = solve(md, 'Gia') -#Fields and tolerances to track changes -field_names = ['UGia', 'UGiaRate'] -field_tolerances = [1e-13, 1e-13] -field_values = [md.results.GiaSolution.UGia, md.results.GiaSolution.UGiaRate] +# Test Name: GiaIvinsBenchmarksAB2dC1 +U_AB2dC1 = md.results.GiaSolution.UGia +URate_AB2dC1 = md.results.GiaSolution.UGiaRate + +# Test Name: GiaIvinsBenchmarksAB2dC2 +# different evaluation time # {{{ +md.timestepping.start_time = 1000.3 # for t \approx 1 kyr +md.geometry.thickness[-1, -1] = md.timestepping.start_time + +md = solve(md, 'Gia') + +U_AB2dC2 = md.results.GiaSolution.UGia +URate_AB2dC2 = md.results.GiaSolution.UGiaRate +# }}} + +# Test Name: GiaIvinsBenchmarksAB2dC3 +# different evaluation time # {{{ +md.timestepping.start_time = 2400000 # for t \approx \infty +md.geometry.thickness[-1, -1] = md.timestepping.start_time + +md = solve(md, 'Gia') + +U_AB2dC3 = md.results.GiaSolution.UGia +URate_AB2dC3 = md.results.GiaSolution.UGiaRate +# }}} + +# Fields and tolerances to track changes +field_names = ['U_AB2dC1', 'URate_AB2dC1', 'U_AB2dC2', 'URate_AB2dC2', 'U_AB2dC3', 'URate_AB2dC3'] +field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13] +field_values = [U_AB2dC1, URate_AB2dC1, U_AB2dC2, URate_AB2dC2, U_AB2dC3, URate_AB2dC3] Index: ../trunk-jpl/test/NightlyRun/test2053.py =================================================================== --- ../trunk-jpl/test/NightlyRun/test2053.py (revision 25299) +++ ../trunk-jpl/test/NightlyRun/test2053.py (revision 25300) @@ -1,4 +1,4 @@ -#Test Name: GiaIvinsBenchmarksAB2dD1 +#Test Name: GiaIvinsBenchmarksAB2dD from socket import gethostname import numpy as np @@ -10,28 +10,64 @@ from triangle import * -#Benchmark experiments (Figure A2c Ivins and James, 1999, Geophys. J. Int.) +# Benchmark experiments (Figure A2a Ivins and James, 1999, Geophys. J. Int.) md = triangle(model(), '../Exp/RoundFrontEISMINT.exp', 200000) md = setmask(md, '', '') md = parameterize(md, '../Par/GiaIvinsBenchmarksCD.py') -#indicate what you want to compute -md.gia.cross_section_shape = 2 # for square-edged x - section +# indicate what you want to compute +md.gia.cross_section_shape = 2 # for elliptical edge -#define loading history -md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get eleastic response! -md.timestepping.final_time = 2500000 # 2, 500 kyr -md.geometry.thickness = np.array([np.append(md.geometry.thickness * 0.0, 0.0), - np.append(md.geometry.thickness / 2.0, 0.1), - np.append(md.geometry.thickness, 0.2), - np.append(md.geometry.thickness, md.timestepping.start_time)]).T +# evaluation time (termed start_time) +md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get elastic response! +md.timestepping.final_time = 2500000 # 2,500 kyr -#solve for GIA deflection +# define loading history +md.geometry.thickness = np.array([ + np.append(md.geometry.thickness * 0.0, 0.0), + np.append(md.geometry.thickness / 2.0, 0.1), + np.append(md.geometry.thickness, 0.2), + np.append(md.geometry.thickness, md.timestepping.start_time) + ]).T + +# find out elements that have zero loads throughout the loading history +pos = np.where(np.abs(md.geometry.thickness[0:-2, :].sum(axis=1)) == 0)[0] +md.mask.ice_levelset[pos] = 1 # no ice + md.cluster = generic('name', gethostname(), 'np', 3) md.verbose = verbose('1111111') + +# solve for GIA deflection md = solve(md, 'Gia') -#Fields and tolerances to track changes -field_names = ['UGia', 'UGiaRate'] -field_tolerances = [1e-13, 1e-13] -field_values = [md.results.GiaSolution.UGia, md.results.GiaSolution.UGiaRate] +# Test Name: GiaIvinsBenchmarksAB2dD1 +U_AB2dD1 = md.results.GiaSolution.UGia +URate_AB2dD1 = md.results.GiaSolution.UGiaRate + +# Test Name: GiaIvinsBenchmarksAB2dD2 +# different evaluation time # {{{ +md.timestepping.start_time = 1000.3 # for t \approx 1 kyr +md.geometry.thickness[-1, -1] = md.timestepping.start_time + +md = solve(md, 'Gia') + +U_AB2dD2 = md.results.GiaSolution.UGia +URate_AB2dD2 = md.results.GiaSolution.UGiaRate +# }}} + +# Test Name: GiaIvinsBenchmarksAB2dD3 +# different evaluation time # {{{ +md.timestepping.start_time = 2400000 # for t \approx \infty +md.geometry.thickness[-1, -1] = md.timestepping.start_time + +md = solve(md, 'Gia') + +U_AB2dD3 = md.results.GiaSolution.UGia +URate_AB2dD3 = md.results.GiaSolution.UGiaRate +# }}} + +# Fields and tolerances to track changes +field_names = ['U_AB2dD1', 'URate_AB2dD1', 'U_AB2dD2', 'URate_AB2dD2', 'U_AB2dD3', 'URate_AB2dD3'] +field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13] +field_values = [U_AB2dD1, URate_AB2dD1, U_AB2dD2, URate_AB2dD2, U_AB2dD3, URate_AB2dD3] + Index: ../trunk-jpl/src/m/solve/WriteData.py =================================================================== --- ../trunk-jpl/src/m/solve/WriteData.py (revision 25299) +++ ../trunk-jpl/src/m/solve/WriteData.py (revision 25300) @@ -1,3 +1,4 @@ +from copy import deepcopy from struct import pack, error import numpy as np import pairoptions @@ -38,6 +39,10 @@ # data = full(data) # end + # Always make a copy of the the data so that we do not accidently overwrite + # any model fields. + data = deepcopy(data) + #Scale data if necesarry if options.exist('scale'): data = np.array(data) @@ -147,7 +152,6 @@ # }}} elif datatype == 'DoubleMat': # {{{ - if isinstance(data, (bool, int, float)): data = np.array([data]) elif isinstance(data, (list, tuple)): @@ -170,7 +174,7 @@ try: fid.write(pack('q', recordlength)) except error as Err: - raise ValueError('Field {} can not be marshaled, {}, with "number" the lenght of the record.'.format(name, Err)) + raise ValueError('Field {} can not be marshaled, {}, with "number" the length of the record.'.format(name, Err)) #write data code and matrix type: fid.write(pack('i', FormatToCode(datatype))) Index: ../trunk-jpl/src/m/solve/parseresultsfromdisk.py =================================================================== --- ../trunk-jpl/src/m/solve/parseresultsfromdisk.py (revision 25299) +++ ../trunk-jpl/src/m/solve/parseresultsfromdisk.py (revision 25300) @@ -17,7 +17,7 @@ try: fid = open(filename, 'rb') except IOError as e: - raise IOError("loadresultsfromdisk error message: could not open '{}' for binary reading.".format(filename)) + raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filename)) #initialize results: saveres = [] @@ -242,16 +242,39 @@ nlayer = md.materials.numlayers degmax = md.love.sh_nmax nfreq = md.love.nfreq - #for numpy 1.8 + only - #temp_field = np.full((degmax + 1, nfreq, nlayer + 1, 6), 0.0) + r0 = md.love.r0 + g0 = md.love.g0 + mu0 = md.love.mu0 + rr = md.materials.radius + rho = md.materials.density + rho_avg = (rho * np.diff(np.power(rr, 3), n=1, axis=0)) / np.diff(np.power(rr, 3).sum()).sum() temp_field = np.empty((degmax + 1, nfreq, nlayer + 1, 6)) temp_field.fill(0.0) for ii in range(degmax + 1): for jj in range(nfreq): for kk in range(nlayer + 1): - ll = ii * (nlayer + 1) * 6 + (kk * 6 + 1) - for mm in range(6): - temp_field[ii, jj, kk, mm] = field[ll + mm - 1, jj] + if kk < nlayer + 1: + ll = ii * (nlayer + 1) * 6 + (kk * 6 + 1) + 3 + temp_field[ii, jj, kk, 0] = field[ll + (1 - 1), jj] * r0 # mm = 4 + temp_field[ii, jj, kk, 1] = field[ll + (2 - 1), jj] * mu0 # mm = 5 + temp_field[ii, jj, kk, 2] = field[ll + (3 - 1), jj] * r0 # mm = 6 + temp_field[ii, jj, kk, 3] = field[ll + (4 - 1), jj] * mu0 # mm = 1 + temp_field[ii, jj, kk, 4] = field[ll + (5 - 1), jj] * r0 * g0 # mm = 2 + temp_field[ii, jj, kk, 5] = field[ll + (6 - 1), jj] * g0 # mm = 3 + print(temp_field) + else: # surface + ll = ii * (nlayer + 1) * 6 - 2 + temp_field[ii, jj, kk, 0] = field[ll + (1 - 1), jj] * r0 + temp_field[ii, jj, kk, 2] = field[ll + (2 - 1), jj] * r0 + temp_field[ii, jj, kk, 4] = field[ll + (3 - 1), jj] * r0 * g0 + # surface BC + temp_field[ii, jj, kk, 3] = 0 + if md.love.forcing_type == 9: + temp_field[ii, jj, kk, 1] = 0 + temp_field[ii, jj, kk, 5] = (2 * ii - 1) / r0 - ii * field[ll + (3 - 1), jj] * g0 + elif md.love.forcing_type == 11: + temp_field[ii, jj, kk, 1] = -(2 * (ii - 1) + 1) * rho_avg / 3 + temp_field[ii, jj, kk, 5] = (2 * ii - 1) / r0 - ii * field[ll + (3 - 1), jj] * g0 field = temp_field if time != -9999: Index: ../trunk-jpl/src/m/solve/parseresultsfromdisk.m =================================================================== --- ../trunk-jpl/src/m/solve/parseresultsfromdisk.m (revision 25299) +++ ../trunk-jpl/src/m/solve/parseresultsfromdisk.m (revision 25300) @@ -270,7 +270,6 @@ end result.step=step; result.field=field; - end % }}} function result=ReadDataDimensions(fid) % {{{ Index: ../trunk-jpl/src/m/solve/marshall.py =================================================================== --- ../trunk-jpl/src/m/solve/marshall.py (revision 25299) +++ ../trunk-jpl/src/m/solve/marshall.py (revision 25300) @@ -5,11 +5,11 @@ ''' MARSHALL - outputs a compatible binary file from @model md, for certain solution type. - The routine creates a compatible binary file from @model md - This binary file will be used for parallel runs in JPL - package + The routine creates a compatible binary file from @model md + This binary file will be used for parallel runs in JPL-package - Usage: - marshall(md) + Usage: + marshall(md) ''' if md.verbose.solution: print("marshalling file {}.bin".format(md.miscellaneous.name)) Index: ../trunk-jpl/src/m/classes/geometry.m =================================================================== --- ../trunk-jpl/src/m/classes/geometry.m (revision 25299) +++ ../trunk-jpl/src/m/classes/geometry.m (revision 25300) @@ -89,8 +89,11 @@ end % }}} function marshall(self,prefix,md,fid) % {{{ + disp(md.geometry.thickness) WriteData(fid,prefix,'object',self,'fieldname','surface','format','DoubleMat','mattype',1); WriteData(fid,prefix,'object',self,'fieldname','thickness','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts); + disp(md.geometry.thickness) + pause WriteData(fid,prefix,'object',self,'fieldname','base','format','DoubleMat','mattype',1); WriteData(fid,prefix,'object',self,'fieldname','bed','format','DoubleMat','mattype',1); WriteData(fid,prefix,'object',self,'fieldname','hydrostatic_ratio','format','DoubleMat','mattype',1); Index: ../trunk-jpl/test/NightlyRun/test2052.m =================================================================== --- ../trunk-jpl/test/NightlyRun/test2052.m (revision 25299) +++ ../trunk-jpl/test/NightlyRun/test2052.m (revision 25300) @@ -8,7 +8,7 @@ md.gia.cross_section_shape=1; % for square-edged x-section % evaluation time (termed start_time) -md.timestepping.start_time=0.3; % for t \approx 0 kyr : to get eleastic response! +md.timestepping.start_time=0.3; % for t \approx 0 kyr: to get elastic response! md.timestepping.final_time=2500000; % 2,500 kyr %% define loading history {{{ @@ -22,7 +22,7 @@ % find out elements that have zero loads throughout the loading history. pos = find(sum(abs(md.geometry.thickness(1:end-1,:)),2)==0); -md.mask.ice_levelset(pos)=1; % no-ice. +md.mask.ice_levelset(pos)=1; % no ice md.cluster=generic('name',oshostname(),'np',3); md.verbose=verbose('1111111'); @@ -30,9 +30,11 @@ %% solve for GIA deflection md=solve(md,'Gia'); +pause + %Test Name: GiaIvinsBenchmarksAB2dC1 U_AB2dC1 = md.results.GiaSolution.UGia; -URate_AB2dC1 = md.results.GiaSolution.UGiaRate; +URate_AB2dC1 = md.results.GiaSolution.UGiaRate; %Test Name: GiaIvinsBenchmarksAB2dC2 %% different evaluation time. {{{