source:
issm/oecreview/Archive/24684-25833/ISSM-25299-25300.diff@
27235
      
      | Last change on this file since 27235 was 25834, checked in by , 5 years ago | |
|---|---|
| File size: 69.2 KB | |
- 
      
../trunk-jpl/src/m/solve/loadresultfromdisk.py
1 import struct 2 3 import numpy as np 4 5 6 def loadresultfromdisk(filename, step, name, *args): # {{{ 7 """ 8 LOADRESULTFROMDISK - load specific result of solution sequence from disk 9 file "filename" 10 11 Usage: 12 variable = loadresultsfromdisk(filename, step, name) 13 14 TODO: 15 - Test this module against output of src/m/solve/loadresultsfromdisk.m 16 """ 17 print('Got hwrwefew') 18 exit() 19 # Open file 20 try: 21 fid = open(filename, 'rb') 22 except IOError as e: 23 raise IOError("loadresultsfromdisk error message: could not open {} for binary reading".format(filename)) 24 25 if len(args) == 4: 26 # Put the pointer on the right position in the file 27 fpos = args[0] 28 fid.seek(fpos) 29 30 while True: 31 # read field 32 fpos = tell(fid) 33 length = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 34 35 fieldname = struct.unpack('{}s'.format(length), fid.read(length))[0][:-1] 36 fieldname = fieldname.decode() #strings are binaries when stored so need to be converted back 37 rtime = struct.unpack('d', fid.read(struct.calcsize('d')))[0] 38 rstep = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 39 40 # TODO: Check number of characters unpacked and break if need be (see 41 # src/m/solve/loadresultfromdisk.py) 42 43 if (rstep == step) and (fieldname == name): 44 # ok, go read the result really 45 datatype = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 46 M = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 47 if datatype == 1: 48 field = np.array(struct.unpack('{}d'.format(M), fid.read(M * struct.calcsize('d'))), dtype=float) 49 elif datatype == 2: 50 field = struct.unpack('{}s'.format(M), fid.read(M))[0][:-1] 51 field = field.decode() 52 elif datatype == 3: 53 N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 54 field = np.zeros(shape=(M, N), dtype=float) 55 for i in range(M): 56 field[i, :] = struct.unpack('{}d'.format(N), fid.read(N * struct.calcsize('d'))) 57 elif datatype == 4: 58 N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 59 field = np.zeros(shape=(M, N), dtype=int) 60 for i in range(M): 61 field[i, :] = struct.unpack('{}i'.format(N), fid.read(N * struct.calcsize('i'))) 62 else: 63 raise TypeError("cannot read data of type {}".format(datatype)) 64 65 variable = field 66 break 67 else: 68 # just skim to next results 69 datatype = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 70 M = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 71 if datatype == 1: 72 fid.seek(M * 8, 1) 73 elif datatype == 2: 74 fid.seek(M, 1) 75 elif datatype == 3: 76 N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 77 fid.seek(N * M * 8, 1) 78 elif datatype == 4: 79 N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 80 fid.seek(N * M * 4, 1) 81 else: 82 raise TypeError("cannot read data of type {}".format(datatype)) 83 84 fid.close() 85 86 return (variable, fpos) 87 # }}}  - 
      
../trunk-jpl/src/m/solve/loadresultfromdisk.m
1 1 function [variable fpos]=loadresultfromdisk(filename,step,name,varargin) 2 %LOADRESULTFROMDISK - load specific result of solution sequence from disk file "filename" 2 %LOADRESULTFROMDISK - load specific result of solution sequence from disk file "filename" 3 3 % 4 4 % Usage: 5 5 % variable=loadresultfromdisk(filename,step,name); … … 11 11 end 12 12 13 13 if nargin==4, 14 %Put the pointer on the right position in the file: 14 15 fpos=varargin{1}; 15 16 fseek(fid,fpos,'bof'); 16 17 end 17 18 18 %Put the pointer on the right position in the file:19 19 while 1, 20 20 21 21 %read field  - 
      
../trunk-jpl/src/m/solve/loadresultsfromcluster.py
1 1 import os 2 import platform 2 3 import socket 3 import platform 4 5 from helpers import * 4 6 from loadresultsfromdisk import loadresultsfromdisk 5 from helpers import *6 7 7 8 8 9 def loadresultsfromcluster(md, runtimename=False):  - 
      
../trunk-jpl/src/m/solve/solve.py
136 136 filelist = [modelname + '.bin ', modelname + '.toolkits ', modelname + '.queue '] 137 137 if md.qmu.isdakota: 138 138 filelist.append(modelname + '.qmu.in') 139 139 140 140 if not restart: 141 141 cluster.UploadQueueJob(md.miscellaneous.name, md.private.runtimename, filelist) 142 142 … … 153 153 if md.verbose.solution: 154 154 print('loading results from cluster') 155 155 md = loadresultsfromcluster(md) 156 157 156 return md  - 
      
../trunk-jpl/src/m/classes/clusters/pfe.m
6 6 % cluster=pfe('np',3,'login','username'); 7 7 8 8 classdef pfe 9 properties (SetAccess=public)10 % {{{11 name = 'pfe'12 login = '';13 modules = {'comp-intel/2016.2.181' 'mpi-sgi/mpt'};14 numnodes = 20;15 cpuspernode = 8;16 port = 1025;17 queue = 'long';18 time = 12*60;19 processor = 'ivy';20 srcpath = '';21 codepath = '';22 executionpath = '';23 grouplist = 's1690';24 interactive = 0;25 bbftp = 0;26 numstreams = 8;27 hyperthreading = 0;28 end29 %}}}30 methods31 function cluster=pfe(varargin) % {{{9 properties (SetAccess=public) 10 % {{{ 11 name = 'pfe' 12 login = ''; 13 modules = {'comp-intel/2016.2.181' 'mpi-sgi/mpt'}; 14 numnodes = 20; 15 cpuspernode = 8; 16 port = 1025; 17 queue = 'long'; 18 time = 12*60; 19 processor = 'ivy'; 20 srcpath = ''; 21 codepath = ''; 22 executionpath = ''; 23 grouplist = 's1690'; 24 interactive = 0; 25 bbftp = 0; 26 numstreams = 8; 27 hyperthreading = 0; 28 end 29 %}}} 30 methods 31 function cluster=pfe(varargin) % {{{ 32 32 33 %initialize cluster using default settings if provided34 if (exist('pfe_settings')==2), pfe_settings; end33 %initialize cluster using default settings if provided 34 if (exist('pfe_settings')==2), pfe_settings; end 35 35 36 %use provided options to change fields37 cluster=AssignObjectFields(pairoptions(varargin{:}),cluster);38 end39 %}}}40 function disp(cluster) % {{{41 % display the object42 disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1)));43 disp(sprintf(' name: %s',cluster.name));44 disp(sprintf(' login: %s',cluster.login));45 modules=''; for i=1:length(cluster.modules), modules=[modules cluster.modules{i} ',']; end; modules=modules(1:end-1);46 disp(sprintf(' modules: %s',modules));47 disp(sprintf(' port: %i',cluster.port));48 disp(sprintf(' numnodes: %i',cluster.numnodes));49 disp(sprintf(' cpuspernode: %i',cluster.cpuspernode));50 disp(sprintf(' np: %i',cluster.cpuspernode*cluster.numnodes));51 disp(sprintf(' queue: %s',cluster.queue));52 disp(sprintf(' time: %i',cluster.time));53 disp(sprintf(' processor: %s',cluster.processor));54 disp(sprintf(' codepath: %s ($ISSM_DIR on pfe)',cluster.codepath));55 disp(sprintf(' executionpath: %s (directory containing issm.exe on pfe)',cluster.executionpath));56 disp(sprintf(' grouplist: %s',cluster.grouplist));57 disp(sprintf(' interactive: %i',cluster.interactive));58 disp(sprintf(' hyperthreading: %i',cluster.hyperthreading));59 end60 %}}}61 function numprocs=np(cluster) % {{{62 %compute number of processors63 numprocs=cluster.numnodes*cluster.cpuspernode;64 end65 %}}}66 function md = checkconsistency(cluster,md,solution,analyses) % {{{36 %use provided options to change fields 37 cluster=AssignObjectFields(pairoptions(varargin{:}),cluster); 38 end 39 %}}} 40 function disp(cluster) % {{{ 41 % display the object 42 disp(sprintf('class ''%s'' object ''%s'' = ',class(cluster),inputname(1))); 43 disp(sprintf(' name: %s',cluster.name)); 44 disp(sprintf(' login: %s',cluster.login)); 45 modules=''; for i=1:length(cluster.modules), modules=[modules cluster.modules{i} ',']; end; modules=modules(1:end-1); 46 disp(sprintf(' modules: %s',modules)); 47 disp(sprintf(' port: %i',cluster.port)); 48 disp(sprintf(' numnodes: %i',cluster.numnodes)); 49 disp(sprintf(' cpuspernode: %i',cluster.cpuspernode)); 50 disp(sprintf(' np: %i',cluster.cpuspernode*cluster.numnodes)); 51 disp(sprintf(' queue: %s',cluster.queue)); 52 disp(sprintf(' time: %i',cluster.time)); 53 disp(sprintf(' processor: %s',cluster.processor)); 54 disp(sprintf(' codepath: %s ($ISSM_DIR on pfe)',cluster.codepath)); 55 disp(sprintf(' executionpath: %s (directory containing issm.exe on pfe)',cluster.executionpath)); 56 disp(sprintf(' grouplist: %s',cluster.grouplist)); 57 disp(sprintf(' interactive: %i',cluster.interactive)); 58 disp(sprintf(' hyperthreading: %i',cluster.hyperthreading)); 59 end 60 %}}} 61 function numprocs=np(cluster) % {{{ 62 %compute number of processors 63 numprocs=cluster.numnodes*cluster.cpuspernode; 64 end 65 %}}} 66 function md = checkconsistency(cluster,md,solution,analyses) % {{{ 67 67 68 available_queues={'long','normal','debug','devel','alphatst@pbspl233'};69 queue_requirements_time=[5*24*60 8*60 2*60 2*60 24*60];70 queue_requirements_np=[2048 2048 150 150 2048];68 available_queues={'long','normal','debug','devel','alphatst@pbspl233'}; 69 queue_requirements_time=[5*24*60 8*60 2*60 2*60 24*60]; 70 queue_requirements_np=[2048 2048 150 150 2048]; 71 71 72 QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,cluster.queue,cluster.np,cluster.time)72 QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,cluster.queue,cluster.np,cluster.time) 73 73 74 %now, check cluster.cpuspernode according to processor type75 if strcmpi(cluster.processor,'wes'),76 if cluster.hyperthreading,77 if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)),78 md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''wes'' processors in hyperthreading mode');79 end80 else81 if ((cluster.cpuspernode>12 ) | (cluster.cpuspernode<1)),82 md = checkmessage(md,'cpuspernode should be between 1 and 12 for ''wes'' processors');83 end84 end85 elseif strcmpi(cluster.processor,'ivy'),86 if cluster.hyperthreading,87 if ((cluster.cpuspernode>40 ) | (cluster.cpuspernode<1)),88 md = checkmessage(md,'cpuspernode should be between 1 and 40 for ''ivy'' processors in hyperthreading mode');89 end90 else91 if ((cluster.cpuspernode>20 ) | (cluster.cpuspernode<1)),92 md = checkmessage(md,'cpuspernode should be between 1 and 20 for ''ivy'' processors');93 end94 end95 elseif strcmpi(cluster.processor,'bro'),96 if cluster.hyperthreading,97 if ((cluster.cpuspernode>56 ) | (cluster.cpuspernode<1)),98 md = checkmessage(md,'cpuspernode should be between 1 and 56 for ''bro'' processors in hyperthreading mode');99 end100 else101 if ((cluster.cpuspernode>28 ) | (cluster.cpuspernode<1)),102 md = checkmessage(md,'cpuspernode should be between 1 and 28 for ''bro'' processors');103 end104 end105 elseif strcmpi(cluster.processor,'has'),106 if cluster.hyperthreading,107 if ((cluster.cpuspernode>48 ) | (cluster.cpuspernode<1)),108 md = checkmessage(md,'cpuspernode should be between 1 and 48 for ''has'' processors in hyperthreading mode');109 end110 else111 if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)),112 md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''has'' processors');113 end114 end115 116 elseif strcmpi(cluster.processor,'san'),117 if cluster.hyperthreading,118 if ((cluster.cpuspernode>32 ) | (cluster.cpuspernode<1)),119 md = checkmessage(md,'cpuspernode should be between 1 and 32 for ''san'' processors in hyperthreading mode');120 end121 else122 if ((cluster.cpuspernode>16 ) | (cluster.cpuspernode<1)),123 md = checkmessage(md,'cpuspernode should be between 1 and 16 for ''san'' processors');124 end125 end74 %now, check cluster.cpuspernode according to processor type 75 if strcmpi(cluster.processor,'wes'), 76 if cluster.hyperthreading, 77 if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), 78 md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''wes'' processors in hyperthreading mode'); 79 end 80 else 81 if ((cluster.cpuspernode>12 ) | (cluster.cpuspernode<1)), 82 md = checkmessage(md,'cpuspernode should be between 1 and 12 for ''wes'' processors'); 83 end 84 end 85 elseif strcmpi(cluster.processor,'ivy'), 86 if cluster.hyperthreading, 87 if ((cluster.cpuspernode>40 ) | (cluster.cpuspernode<1)), 88 md = checkmessage(md,'cpuspernode should be between 1 and 40 for ''ivy'' processors in hyperthreading mode'); 89 end 90 else 91 if ((cluster.cpuspernode>20 ) | (cluster.cpuspernode<1)), 92 md = checkmessage(md,'cpuspernode should be between 1 and 20 for ''ivy'' processors'); 93 end 94 end 95 elseif strcmpi(cluster.processor,'bro'), 96 if cluster.hyperthreading, 97 if ((cluster.cpuspernode>56 ) | (cluster.cpuspernode<1)), 98 md = checkmessage(md,'cpuspernode should be between 1 and 56 for ''bro'' processors in hyperthreading mode'); 99 end 100 else 101 if ((cluster.cpuspernode>28 ) | (cluster.cpuspernode<1)), 102 md = checkmessage(md,'cpuspernode should be between 1 and 28 for ''bro'' processors'); 103 end 104 end 105 elseif strcmpi(cluster.processor,'has'), 106 if cluster.hyperthreading, 107 if ((cluster.cpuspernode>48 ) | (cluster.cpuspernode<1)), 108 md = checkmessage(md,'cpuspernode should be between 1 and 48 for ''has'' processors in hyperthreading mode'); 109 end 110 else 111 if ((cluster.cpuspernode>24 ) | (cluster.cpuspernode<1)), 112 md = checkmessage(md,'cpuspernode should be between 1 and 24 for ''has'' processors'); 113 end 114 end 115 116 elseif strcmpi(cluster.processor,'san'), 117 if cluster.hyperthreading, 118 if ((cluster.cpuspernode>32 ) | (cluster.cpuspernode<1)), 119 md = checkmessage(md,'cpuspernode should be between 1 and 32 for ''san'' processors in hyperthreading mode'); 120 end 121 else 122 if ((cluster.cpuspernode>16 ) | (cluster.cpuspernode<1)), 123 md = checkmessage(md,'cpuspernode should be between 1 and 16 for ''san'' processors'); 124 end 125 end 126 126 127 else128 md = checkmessage(md,'unknown processor type, should be ''wes'' or ''has'' or ''ivy'' or ''san''');129 end127 else 128 md = checkmessage(md,'unknown processor type, should be ''wes'' or ''has'' or ''ivy'' or ''san'''); 129 end 130 130 131 %Miscelaneous132 if isempty(cluster.login), md = checkmessage(md,'login empty'); end133 if isempty(cluster.srcpath), md = checkmessage(md,'srcpath empty'); end134 if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end135 if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end136 if isempty(cluster.grouplist), md = checkmessage(md,'grouplist empty'); end131 %Miscellaneous 132 if isempty(cluster.login), md = checkmessage(md,'login empty'); end 133 if isempty(cluster.srcpath), md = checkmessage(md,'srcpath empty'); end 134 if isempty(cluster.codepath), md = checkmessage(md,'codepath empty'); end 135 if isempty(cluster.executionpath), md = checkmessage(md,'executionpath empty'); end 136 if isempty(cluster.grouplist), md = checkmessage(md,'grouplist empty'); end 137 137 138 end139 %}}}140 function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{138 end 139 %}}} 140 function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{ 141 141 142 if(isgprof), disp('gprof not supported by cluster, ignoring...'); end142 if(isgprof), disp('gprof not supported by cluster, ignoring...'); end 143 143 144 executable='issm.exe';145 if isdakota,146 version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));147 if (version>=6),148 executable='issm_dakota.exe';149 end150 end151 if isoceancoupling,152 executable='issm_ocean.exe';153 end144 executable='issm.exe'; 145 if isdakota, 146 version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3)); 147 if (version>=6), 148 executable='issm_dakota.exe'; 149 end 150 end 151 if isoceancoupling, 152 executable='issm_ocean.exe'; 153 end 154 154 155 %write queuing script156 fid=fopen([modelname '.queue'],'w');157 fprintf(fid,'#PBS -S /bin/bash\n');158 % fprintf(fid,'#PBS -N %s\n',modelname);159 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor);160 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds.161 fprintf(fid,'#PBS -q %s \n',cluster.queue);162 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist);163 fprintf(fid,'#PBS -m e\n');164 fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]);165 fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]);166 fprintf(fid,'. /usr/share/modules/init/bash\n\n');167 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end168 fprintf(fid,'export PATH="$PATH:."\n\n');169 fprintf(fid,'export MPI_LAUNCH_TIMEOUT=520\n');170 fprintf(fid,'export MPI_GROUP_MAX=64\n\n');171 fprintf(fid,'export ISSM_DIR="%s"\n',cluster.srcpath); %FIXME172 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME173 fprintf(fid,'export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$ISSM_DIR/externalpackages/petsc/install/lib"\n');174 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname);175 if ~isvalgrind,176 fprintf(fid,'/u/scicon/tools/bin/several_tries mpiexec -np %i /u/scicon/tools/bin/mbind.x -cs -n%i %s/%s %s %s %s\n',cluster.np,cluster.cpuspernode,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);177 else178 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);179 end180 if ~io_gather, %concatenate the output files:181 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);182 end183 fclose(fid);155 %write queuing script 156 fid=fopen([modelname '.queue'],'w'); 157 fprintf(fid,'#PBS -S /bin/bash\n'); 158 % fprintf(fid,'#PBS -N %s\n',modelname); 159 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); 160 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 161 fprintf(fid,'#PBS -q %s \n',cluster.queue); 162 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 163 fprintf(fid,'#PBS -m e\n'); 164 fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); 165 fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); 166 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 167 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end 168 fprintf(fid,'export PATH="$PATH:."\n\n'); 169 fprintf(fid,'export MPI_LAUNCH_TIMEOUT=520\n'); 170 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 171 fprintf(fid,'export ISSM_DIR="%s"\n',cluster.srcpath); %FIXME 172 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME 173 fprintf(fid,'export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$ISSM_DIR/externalpackages/petsc/install/lib"\n'); 174 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); 175 if ~isvalgrind, 176 fprintf(fid,'/u/scicon/tools/bin/several_tries mpiexec -np %i /u/scicon/tools/bin/mbind.x -cs -n%i %s/%s %s %s %s\n',cluster.np,cluster.cpuspernode,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); 177 else 178 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname); 179 end 180 if ~io_gather, %concatenate the output files: 181 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 182 end 183 fclose(fid); 184 184 185 %in interactive mode, create a run file, and errlog and outlog file186 if cluster.interactive,187 fid=fopen([modelname '.run'],'w');188 if cluster.interactive==10,189 fprintf(fid,'module unload mpi-mvapich2/1.4.1/gcc\n');190 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[pwd() '/run'],modelname);191 else192 if ~isvalgrind,193 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);194 else195 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname);196 end197 end198 if ~io_gather, %concatenate the output files:199 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);200 end201 fclose(fid);202 fid=fopen([modelname '.errlog'],'w');203 fclose(fid);204 fid=fopen([modelname '.outlog'],'w');205 fclose(fid);206 end207 end %}}}208 function BuildQueueScriptMultipleModels(cluster,dirname,modelname,solution,dirnames,modelnames,nps) % {{{185 %in interactive mode, create a run file, and errlog and outlog file 186 if cluster.interactive, 187 fid=fopen([modelname '.run'],'w'); 188 if cluster.interactive==10, 189 fprintf(fid,'module unload mpi-mvapich2/1.4.1/gcc\n'); 190 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[pwd() '/run'],modelname); 191 else 192 if ~isvalgrind, 193 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); 194 else 195 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/Interactive' num2str(cluster.interactive)],modelname); 196 end 197 end 198 if ~io_gather, %concatenate the output files: 199 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 200 end 201 fclose(fid); 202 fid=fopen([modelname '.errlog'],'w'); 203 fclose(fid); 204 fid=fopen([modelname '.outlog'],'w'); 205 fclose(fid); 206 end 207 end %}}} 208 function BuildQueueScriptMultipleModels(cluster,dirname,modelname,solution,dirnames,modelnames,nps) % {{{ 209 209 210 %some checks:211 if isempty(modelname), error('BuildQueueScriptMultipleModels error message: need a non empty model name!');end210 %some checks: 211 if isempty(modelname), error('BuildQueueScriptMultipleModels error message: need a non empty model name!');end 212 212 213 %what is the executable being called?214 executable='issm_slr.exe';213 %what is the executable being called? 214 executable='issm_slr.exe'; 215 215 216 if ispc(), error('BuildQueueScriptMultipleModels not support yet on windows machines');end;216 if ispc(), error('BuildQueueScriptMultipleModels not support yet on windows machines');end; 217 217 218 %write queuing script219 fid=fopen([modelname '.queue'],'w');218 %write queuing script 219 fid=fopen([modelname '.queue'],'w'); 220 220 221 fprintf(fid,'#PBS -S /bin/bash\n');222 fprintf(fid,'#PBS -N %s\n',modelname);223 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor);224 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds.225 fprintf(fid,'#PBS -q %s \n',cluster.queue);226 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist);227 fprintf(fid,'#PBS -m e\n');228 fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]);229 fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]);230 fprintf(fid,'. /usr/share/modules/init/bash\n\n');231 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end232 fprintf(fid,'export PATH="$PATH:."\n\n');233 fprintf(fid,'export MPI_GROUP_MAX=64\n\n');234 fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME235 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME236 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname);221 fprintf(fid,'#PBS -S /bin/bash\n'); 222 fprintf(fid,'#PBS -N %s\n',modelname); 223 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); 224 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 225 fprintf(fid,'#PBS -q %s \n',cluster.queue); 226 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 227 fprintf(fid,'#PBS -m e\n'); 228 fprintf(fid,'#PBS -o %s.outlog \n',[cluster.executionpath '/' dirname '/' modelname]); 229 fprintf(fid,'#PBS -e %s.errlog \n\n',[cluster.executionpath '/' dirname '/' modelname]); 230 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 231 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end 232 fprintf(fid,'export PATH="$PATH:."\n\n'); 233 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 234 fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME 235 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME 236 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname); 237 237 238 %number of cpus:239 mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode);238 %number of cpus: 239 mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); 240 240 241 %executable:242 mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)];241 %executable: 242 mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; 243 243 244 %solution name:245 mpistring=[mpistring sprintf('%s ',solution)];244 %solution name: 245 mpistring=[mpistring sprintf('%s ',solution)]; 246 246 247 %execution directory and model name:248 mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)];247 %execution directory and model name: 248 mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; 249 249 250 %inform main executable of how many icecaps, glaciers and earth models are being run:251 mpistring=[mpistring sprintf(' %i ',length(dirnames))];250 %inform main executable of how many icecaps, glaciers and earth models are being run: 251 mpistring=[mpistring sprintf(' %i ',length(dirnames))]; 252 252 253 %icecaps, glaciers and earth location, names and number of processors associated:254 for i=1:length(dirnames),255 mpistring=[mpistring sprintf(' %s/%s %s %i ',cluster.executionpath,dirnames{i},modelnames{i},nps{i})];256 end253 %icecaps, glaciers and earth location, names and number of processors associated: 254 for i=1:length(dirnames), 255 mpistring=[mpistring sprintf(' %s/%s %s %i ',cluster.executionpath,dirnames{i},modelnames{i},nps{i})]; 256 end 257 257 258 %write this long string to disk:259 fprintf(fid,mpistring);260 fclose(fid);258 %write this long string to disk: 259 fprintf(fid,mpistring); 260 fclose(fid); 261 261 262 if cluster.interactive,263 fid=fopen([modelname '.run'],'w');262 if cluster.interactive, 263 fid=fopen([modelname '.run'],'w'); 264 264 265 %number of cpus:266 mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode);265 %number of cpus: 266 mpistring=sprintf('mpiexec -np %i ',cluster.numnodes*cluster.cpuspernode); 267 267 268 %executable:269 mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)];268 %executable: 269 mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)]; 270 270 271 %solution name:272 mpistring=[mpistring sprintf('%s ',solution)];271 %solution name: 272 mpistring=[mpistring sprintf('%s ',solution)]; 273 273 274 %execution directory and model name:275 mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)];274 %execution directory and model name: 275 mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)]; 276 276 277 %inform main executable of how many icecaps, glaciers and earth models are being run:278 mpistring=[mpistring sprintf(' %i ',length(dirnames))];277 %inform main executable of how many icecaps, glaciers and earth models are being run: 278 mpistring=[mpistring sprintf(' %i ',length(dirnames))]; 279 279 280 %icecaps, glaciers and earth location, names and number of processors associated:281 for i=1:length(dirnames),282 mpistring=[mpistring sprintf(' %s/Interactive%i %s %i ',cluster.executionpath,cluster.interactive,modelnames{i},nps{i})];283 end280 %icecaps, glaciers and earth location, names and number of processors associated: 281 for i=1:length(dirnames), 282 mpistring=[mpistring sprintf(' %s/Interactive%i %s %i ',cluster.executionpath,cluster.interactive,modelnames{i},nps{i})]; 283 end 284 284 285 %write this long string to disk:286 fprintf(fid,mpistring);287 fclose(fid);285 %write this long string to disk: 286 fprintf(fid,mpistring); 287 fclose(fid); 288 288 289 fid=fopen([modelname '.errlog'],'w');290 fclose(fid);291 fid=fopen([modelname '.outlog'],'w');292 fclose(fid);293 end294 end295 %}}}296 function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{289 fid=fopen([modelname '.errlog'],'w'); 290 fclose(fid); 291 fid=fopen([modelname '.outlog'],'w'); 292 fclose(fid); 293 end 294 end 295 %}}} 296 function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{ 297 297 298 if(isgprof), disp('gprof not supported by cluster, ignoring...'); end298 if(isgprof), disp('gprof not supported by cluster, ignoring...'); end 299 299 300 %write queuing script301 fid=fopen([modelname '.queue'],'w');302 fprintf(fid,'#PBS -S /bin/bash\n');303 %fprintf(fid,'#PBS -N %s\n',modelname);304 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor);305 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds.306 fprintf(fid,'#PBS -q %s \n',cluster.queue);307 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist);308 fprintf(fid,'#PBS -m e\n');309 fprintf(fid,'#PBS -o %s.outlog \n',modelname);310 fprintf(fid,'#PBS -e %s.errlog \n\n',modelname);311 fprintf(fid,'. /usr/share/modules/init/bash\n\n');312 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end313 fprintf(fid,'export PATH="$PATH:."\n');314 fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME315 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME316 fprintf(fid,'export MPI_GROUP_MAX=64\n\n');317 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,modelname);318 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); %FIXME319 if ~io_gather, %concatenate the output files:320 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);321 end322 fclose(fid);300 %write queuing script 301 fid=fopen([modelname '.queue'],'w'); 302 fprintf(fid,'#PBS -S /bin/bash\n'); 303 % fprintf(fid,'#PBS -N %s\n',modelname); 304 fprintf(fid,'#PBS -l select=%i:ncpus=%i:model=%s\n',cluster.numnodes,cluster.cpuspernode,cluster.processor); 305 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 306 fprintf(fid,'#PBS -q %s \n',cluster.queue); 307 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 308 fprintf(fid,'#PBS -m e\n'); 309 fprintf(fid,'#PBS -o %s.outlog \n',modelname); 310 fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); 311 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 312 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end 313 fprintf(fid,'export PATH="$PATH:."\n'); 314 fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME 315 fprintf(fid,'source $ISSM_DIR/etc/environment.sh\n'); %FIXME 316 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 317 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,modelname); 318 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); %FIXME 319 if ~io_gather, %concatenate the output files: 320 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 321 end 322 fclose(fid); 323 323 324 %in interactive mode, create a run file, and errlog and outlog file325 if cluster.interactive,326 fid=fopen([modelname '.run'],'w');327 if ~isvalgrind,328 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname);329 else330 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname);331 end332 if ~io_gather, %concatenate the output files:333 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);334 end335 fclose(fid);336 fid=fopen([modelname '.errlog'],'w');337 fclose(fid);338 fid=fopen([modelname '.outlog'],'w');339 fclose(fid);340 end341 end %}}}342 function BuildOceanQueueScript(np,cluster,modelname) % {{{324 %in interactive mode, create a run file, and errlog and outlog file 325 if cluster.interactive, 326 fid=fopen([modelname '.run'],'w'); 327 if ~isvalgrind, 328 fprintf(fid,'mpiexec -np %i %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); 329 else 330 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/kriging.exe %s %s\n',cluster.np,cluster.codepath,[cluster.executionpath '/' modelname],modelname); 331 end 332 if ~io_gather, %concatenate the output files: 333 fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 334 end 335 fclose(fid); 336 fid=fopen([modelname '.errlog'],'w'); 337 fclose(fid); 338 fid=fopen([modelname '.outlog'],'w'); 339 fclose(fid); 340 end 341 end %}}} 342 function BuildOceanQueueScript(np,cluster,modelname) % {{{ 343 343 344 %write queuing script345 fid=fopen([modelname '.queue'],'w');346 fprintf(fid,'#PBS -S /bin/bash\n');347 fprintf(fid,'#PBS -l select=1:ncpus=%i:model=%s\n',np,cluster.processor);348 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds.349 fprintf(fid,'#PBS -q %s \n',cluster.queue);350 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist);351 fprintf(fid,'#PBS -m e\n');352 fprintf(fid,'#PBS -o %s.outlog \n',modelname);353 fprintf(fid,'#PBS -e %s.errlog \n\n',modelname);354 fprintf(fid,'. /usr/share/modules/init/bash\n\n');355 %for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end %FIXME: should use this!356 fprintf(fid,'module load comp-intel/2016.2.181\n');357 fprintf(fid,'module load netcdf/4.4.1.1_mpt\n');358 fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n');359 fprintf(fid,'export PATH="$PATH:."\n');360 fprintf(fid,'export MPI_GROUP_MAX=64\n\n');361 fprintf(fid,['cd ' pwd() ' \n\n']);362 fprintf(fid,'mpiexec -np %i ./mitgcmuv\n',np);363 % if ~io_gather, %concatenate the output files:364 % fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname);365 % end366 fclose(fid);344 %write queuing script 345 fid=fopen([modelname '.queue'],'w'); 346 fprintf(fid,'#PBS -S /bin/bash\n'); 347 fprintf(fid,'#PBS -l select=1:ncpus=%i:model=%s\n',np,cluster.processor); 348 fprintf(fid,'#PBS -l walltime=%i\n',cluster.time*60); %walltime is in seconds. 349 fprintf(fid,'#PBS -q %s \n',cluster.queue); 350 fprintf(fid,'#PBS -W group_list=%s\n',cluster.grouplist); 351 fprintf(fid,'#PBS -m e\n'); 352 fprintf(fid,'#PBS -o %s.outlog \n',modelname); 353 fprintf(fid,'#PBS -e %s.errlog \n\n',modelname); 354 fprintf(fid,'. /usr/share/modules/init/bash\n\n'); 355 %for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end %FIXME: should use this! 356 fprintf(fid,'module load comp-intel/2016.2.181\n'); 357 fprintf(fid,'module load netcdf/4.4.1.1_mpt\n'); 358 fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); 359 fprintf(fid,'export PATH="$PATH:."\n'); 360 fprintf(fid,'export MPI_GROUP_MAX=64\n\n'); 361 fprintf(fid,['cd ' pwd() ' \n\n']); 362 fprintf(fid,'mpiexec -np %i ./mitgcmuv\n',np); 363 % if ~io_gather, %concatenate the output files: 364 % fprintf(fid,'cat %s.outbin.* > %s.outbin',modelname,modelname); 365 % end 366 fclose(fid); 367 367 368 %in interactive mode, create a run file, and errlog and outlog file369 if cluster.interactive,370 fid=fopen([modelname '.run'],'w');371 fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n');372 fprintf(fid,['mpiexec -np %i ./mitgcmuv \n'],np);373 fprintf(fid,['touch ' modelname '.lock %s\n']);374 fclose(fid);375 fid=fopen([modelname '.errlog'],'w');376 fclose(fid);377 fid=fopen([modelname '.outlog'],'w');378 fclose(fid);379 end368 %in interactive mode, create a run file, and errlog and outlog file 369 if cluster.interactive, 370 fid=fopen([modelname '.run'],'w'); 371 fprintf(fid,'module load mpi-sgi/mpt.2.15r20\n'); 372 fprintf(fid,['mpiexec -np %i ./mitgcmuv \n'],np); 373 fprintf(fid,['touch ' modelname '.lock %s\n']); 374 fclose(fid); 375 fid=fopen([modelname '.errlog'],'w'); 376 fclose(fid); 377 fid=fopen([modelname '.outlog'],'w'); 378 fclose(fid); 379 end 380 380 381 end %}}}382 function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{381 end %}}} 382 function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{ 383 383 384 %compress the files into one zip.385 compressstring=['tar -zcf ' dirname '.tar.gz '];386 for i=1:numel(filelist),387 compressstring = [compressstring ' ' filelist{i}];388 end389 if cluster.interactive,390 compressstring = [compressstring ' ' modelname '.run ' modelname '.errlog ' modelname '.outlog '];391 end392 system(compressstring);384 %compress the files into one zip. 385 compressstring=['tar -zcf ' dirname '.tar.gz ']; 386 for i=1:numel(filelist), 387 compressstring = [compressstring ' ' filelist{i}]; 388 end 389 if cluster.interactive, 390 compressstring = [compressstring ' ' modelname '.run ' modelname '.errlog ' modelname '.outlog ']; 391 end 392 system(compressstring); 393 393 394 disp('uploading input file and queueing script');395 if cluster.interactive==10,396 directory=[pwd() '/run/'];397 elseif cluster.interactive,398 directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive)];399 else400 directory=cluster.executionpath;401 end394 disp('uploading input file and queueing script'); 395 if cluster.interactive==10, 396 directory=[pwd() '/run/']; 397 elseif cluster.interactive, 398 directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive)]; 399 else 400 directory=cluster.executionpath; 401 end 402 402 403 if ~cluster.bbftp,404 issmscpout(cluster.name,directory,cluster.login,cluster.port,{[dirname '.tar.gz']});405 else406 issmbbftpout(cluster.name,directory,cluster.login,cluster.port,cluster.numstreams,{[dirname '.tar.gz']});407 end403 if ~cluster.bbftp, 404 issmscpout(cluster.name,directory,cluster.login,cluster.port,{[dirname '.tar.gz']}); 405 else 406 issmbbftpout(cluster.name,directory,cluster.login,cluster.port,cluster.numstreams,{[dirname '.tar.gz']}); 407 end 408 408 409 end410 %}}}411 function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{409 end 410 %}}} 411 function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{ 412 412 413 %lauch command, to be executed via ssh414 if ~cluster.interactive,415 if ~isempty(restart)416 launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && qsub ' modelname '.queue '];417 else418 launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ...419 ' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz && qsub ' modelname '.queue '];420 end421 else422 if ~isempty(restart)423 launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive)];424 else425 if cluster.interactive==10,426 launchcommand=['cd ' pwd() '/run && tar -zxf ' dirname '.tar.gz'];427 else428 launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive) ' && tar -zxf ' dirname '.tar.gz'];429 end430 end431 end413 %lauch command, to be executed via ssh 414 if ~cluster.interactive, 415 if ~isempty(restart) 416 launchcommand=['cd ' cluster.executionpath ' && cd ' dirname ' && qsub ' modelname '.queue ']; 417 else 418 launchcommand=['cd ' cluster.executionpath ' && rm -rf ./' dirname ' && mkdir ' dirname ... 419 ' && cd ' dirname ' && mv ../' dirname '.tar.gz ./ && tar -zxf ' dirname '.tar.gz && qsub ' modelname '.queue ']; 420 end 421 else 422 if ~isempty(restart) 423 launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive)]; 424 else 425 if cluster.interactive==10, 426 launchcommand=['cd ' pwd() '/run && tar -zxf ' dirname '.tar.gz']; 427 else 428 launchcommand=['cd ' cluster.executionpath '/Interactive' num2str(cluster.interactive) ' && tar -zxf ' dirname '.tar.gz']; 429 end 430 end 431 end 432 432 433 disp('launching solution sequence on remote cluster');434 issmssh(cluster.name,cluster.login,cluster.port,launchcommand);435 end436 %}}}437 function Download(cluster,dirname,filelist)% {{{433 disp('launching solution sequence on remote cluster'); 434 issmssh(cluster.name,cluster.login,cluster.port,launchcommand); 435 end 436 %}}} 437 function Download(cluster,dirname,filelist)% {{{ 438 438 439 %copy files from cluster to current directory440 if cluster.interactive==10,441 directory=[pwd() '/run/'];442 elseif ~cluster.interactive,443 directory=[cluster.executionpath '/' dirname '/'];444 else445 directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive) '/'];446 end439 %copy files from cluster to current directory 440 if cluster.interactive==10, 441 directory=[pwd() '/run/']; 442 elseif ~cluster.interactive, 443 directory=[cluster.executionpath '/' dirname '/']; 444 else 445 directory=[cluster.executionpath '/Interactive' num2str(cluster.interactive) '/']; 446 end 447 447 448 if ~cluster.bbftp,449 issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist);450 else451 issmbbftpin(cluster.name, cluster.login, cluster.port, cluster.numstreams, directory, filelist);452 end448 if ~cluster.bbftp, 449 issmscpin(cluster.name,cluster.login,cluster.port,directory,filelist); 450 else 451 issmbbftpin(cluster.name, cluster.login, cluster.port, cluster.numstreams, directory, filelist); 452 end 453 453 454 end %}}}454 end %}}} 455 455 end 456 456 end  - 
      
../trunk-jpl/src/m/classes/clusters/pfe.py
31 31 self.queue = 'long' 32 32 self.time = 12 * 60 33 33 self.processor = 'wes' 34 self.srcpath = '' 34 35 self.codepath = '' 35 36 self.executionpath = '' 36 37 self.grouplist = 's1010' … … 60 61 s = "%s\n%s" % (s, fielddisplay(self, 'cpuspernode', 'number of nodes per CPUs')) 61 62 s = "%s\n%s" % (s, fielddisplay(self, 'np', 'number of CPUs')) 62 63 s = "%s\n%s" % (s, fielddisplay(self, 'port', 'machine access port')) 63 s = "%s\n%s" % (s, fielddisplay(self, 'codepath', 'code path on the cluster'))64 s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'execution path on the cluster'))65 64 s = "%s\n%s" % (s, fielddisplay(self, 'queue', 'name of the queue')) 66 65 s = "%s\n%s" % (s, fielddisplay(self, 'time', 'walltime requested')) 67 66 s = "%s\n%s" % (s, fielddisplay(self, 'processor', 'type of processor')) 67 s = "%s\n%s" % (s, fielddisplay(self, 'codepath', '$ISSM_DIR on pfe')) 68 s = "%s\n%s" % (s, fielddisplay(self, 'executionpath', 'directory containing issm.exe on pfe')) 68 69 s = "%s\n%s" % (s, fielddisplay(self, 'grouplist', 'name of the group')) 69 70 s = "%s\n%s" % (s, fielddisplay(self, 'interactive', '')) 70 71 s = "%s\n%s" % (s, fielddisplay(self, 'bbftp', '')) … … 112 113 else: 113 114 md = md.checkmessage('unknown processor type, should be ''neh'', ''wes'' or ''har'' or ''ivy''') 114 115 115 #Miscelaneous116 #Miscellaneous 116 117 if not self.login: 117 118 md = md.checkmessage('login empty') 119 if not self.srcpath: 120 md = md.checkmessage('srcpath empty') 118 121 if not self.codepath: 119 122 md = md.checkmessage('codepath empty') 120 123 if not self.executionpath: … … 152 155 fid.write('module load mpi-sgi/mpt.2.11r13\n') 153 156 fid.write('export PATH="$PATH:."\n\n') 154 157 fid.write('export MPI_GROUP_MAX=64\n\n') 155 fid.write('export ISSM_DIR="%s /../ "\n' % self.codepath)158 fid.write('export ISSM_DIR="%s"\n' % self.srcpath) 156 159 fid.write('source $ISSM_DIR/etc/environment.sh\n') 160 fid.write('export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$ISSM_DIR/externalpackages/petsc/install/lib"\n') 157 161 fid.write('cd %s/%s/ \n\n' % (self.executionpath, dirname)) 158 162 fid.write('mpiexec - np %i %s/%s %s %s/%s %s\n' % (self.nprocs(), self.codepath, executable, str(solution), self.executionpath, dirname, modelname)) 159 163  - 
      
../trunk-jpl/src/m/classes/fourierlove.py
64 64 self.frequencies = [0] #Hz 65 65 self.sh_nmax = 256 # .35 degree, 40 km at the equator. 66 66 self.sh_nmin = 1 67 self.g0 = 10 # m /s^268 self.r0 = 637 8e3#m69 self.mu0 = 1e11 # Pa67 self.g0 = 9.81 # m/s^2 68 self.r0 = 6371e3 #m 69 self.mu0 = 1e11 # Pa 70 70 self.allow_layer_deletion = 1 71 71 self.love_kernels = 0 72 72 self.forcing_type = 11  - 
      
../trunk-jpl/test/Par/GiaIvinsBenchmarksAB.py
11 11 from verbose import * 12 12 from SetIceSheetBC import * 13 13 14 14 15 rad = 600000. 15 16 nv = md.mesh.numberofvertices 16 17 if (np.isnan(md.geometry.thickness)): … … 20 21 if (dist <= rad): 21 22 md.geometry.thickness[i] = 2000.0 22 23 else: 23 md.geometry.thickness[i] = 1.0 # non-zero thickness24 md.geometry.thickness[i] = 0 24 25 25 26 md.geometry.thickness = md.geometry.thickness.reshape(-1, 1) 26 27 md.geometry.base = np.zeros((md.mesh.numberofvertices, 1))  - 
      
../trunk-jpl/test/Par/GiaIvinsBenchmarksCD.py
1 1 #Geometry specific to Experiments C and D 2 3 2 import inspect 4 3 import os.path 5 4 … … 22 21 if (dist <= rad): 23 22 md.geometry.thickness[i] = 3000.0 24 23 else: 25 md.geometry.thickness[i] = 1.0 # non - zero thickness24 md.geometry.thickness[i] = 0 26 25 27 26 md.geometry.thickness = md.geometry.thickness.reshape(-1, 1) 28 27 md.geometry.base = np.zeros((md.mesh.numberofvertices, 1))  - 
      
../trunk-jpl/test/NightlyRun/test2001.py
31 31 #Define loading history (see test2001.m for the description) 32 32 md.timestepping.start_time = 2400000 # 2, 400 kyr 33 33 md.timestepping.final_time = 2500000 # 2, 500 kyr 34 md.geometry.thickness = np.vstack((np.hstack((md.geometry.thickness * 0.0, 0.0)), 35 np.hstack((md.geometry.thickness / 2.0, 0.1)), 36 np.hstack((md.geometry.thickness, 0.2)), 37 np.hstack((md.geometry.thickness, 1.0)), 38 np.hstack((md.geometry.thickness, md.timestepping.start_time)))).T 34 md.geometry.thickness = np.array([ 35 np.append(md.geometry.thickness * 0.0, 0.0), 36 np.append(md.geometry.thickness / 2.0, 0.1), 37 np.append(md.geometry.thickness, 0.2), 38 np.append(md.geometry.thickness, 1.0), 39 np.append(md.geometry.thickness, md.timestepping.start_time) 40 ]).T 39 41 40 42 #Solve for GIA deflection 41 43 md.cluster = generic('name', gethostname(), 'np', 3)  - 
      
../trunk-jpl/test/NightlyRun/test2051.m
23 23 24 24 % find out elements that have zero loads throughout the loading history. 25 25 pos = find(sum(abs(md.geometry.thickness(1:end-1,:)),2)==0); 26 md.mask.ice_levelset(pos)=1; % no -ice.26 md.mask.ice_levelset(pos)=1; % no ice 27 27 28 28 md.cluster=generic('name',oshostname(),'np',3); 29 29 md.verbose=verbose('1111111'); … … 37 37 38 38 %Test Name: GiaIvinsBenchmarksAB2dA2 39 39 %% different evaluation time. {{{ 40 md.timestepping.start_time=2005100; % after 5 kyr of deglaciation 41 md.geometry.thickness(end,end) = md.timestepping.start_time; 40 md.timestepping.start_time=2005100; % after 5 kyr of deglaciation 41 md.geometry.thickness(end,end) = md.timestepping.start_time; 42 42 43 43 md=solve(md,'Gia'); 44 44  - 
      
../trunk-jpl/test/NightlyRun/test2053.m
8 8 md.gia.cross_section_shape=2; % for elliptical edge 9 9 10 10 % evaluation time (termed start_time) 11 md.timestepping.start_time=0.3; % for t \approx 0 kyr : to get el eastic response!11 md.timestepping.start_time=0.3; % for t \approx 0 kyr : to get elastic response! 12 12 md.timestepping.final_time=2500000; % 2,500 kyr 13 13 14 14 %% define loading history {{{ … … 22 22 23 23 % find out elements that have zero loads throughout the loading history. 24 24 pos = find(sum(abs(md.geometry.thickness(1:end-1,:)),2)==0); 25 md.mask.ice_levelset(pos)=1; % no -ice.25 md.mask.ice_levelset(pos)=1; % no ice 26 26 27 27 md.cluster=generic('name',oshostname(),'np',3); 28 28 md.verbose=verbose('1111111');  - 
      
../trunk-jpl/test/NightlyRun/test2051.py
1 #Test Name: GiaIvinsBenchmarksAB2dA 11 #Test Name: GiaIvinsBenchmarksAB2dA 2 2 from socket import gethostname 3 3 4 4 import numpy as np … … 16 16 md = parameterize(md, '../Par/GiaIvinsBenchmarksAB.py') 17 17 18 18 # indicate what you want to compute 19 md.gia.cross_section_shape = 1 # for square-edged x -section19 md.gia.cross_section_shape = 1 # for square-edged x-section 20 20 21 # define loading history21 # evaluation time (termed start_time) 22 22 md.timestepping.start_time = 2002100 # after 2 kyr of deglaciation 23 md.timestepping.final_time = 2500000 # 2, 500 kyr 24 md.geometry.thickness = np.array([np.append(md.geometry.thickness * 0.0, 0.0), 25 np.append(md.geometry.thickness, 1000), 26 np.append(md.geometry.thickness, 2000000), 27 np.append(md.geometry.thickness * 0.0, 2000100), 28 np.append(md.geometry.thickness * 0.0, md.timestepping.start_time)]).T 23 md.timestepping.final_time = 2500000 # 2,500 kyr 29 24 30 # solve for GIA deflection 25 # define loading history 26 md.geometry.thickness = np.array([ 27 np.append(md.geometry.thickness * 0.0, 0.0), 28 np.append(md.geometry.thickness, 1000), 29 np.append(md.geometry.thickness, 2000000), 30 np.append(md.geometry.thickness * 0.0, 2000100), 31 np.append(md.geometry.thickness * 0.0, md.timestepping.start_time) 32 ]).T 33 34 # find out the elements that have zero loads throughout the loading history 35 pos = np.where(np.abs(md.geometry.thickness[0:-2, :].sum(axis=1)) == 0)[0] 36 md.mask.ice_levelset[pos] = 1 # no ice 37 31 38 md.cluster = generic('name', gethostname(), 'np', 3) 32 39 md.verbose = verbose('1111111') 40 41 # solve for GIA deflection 33 42 md = solve(md, 'Gia') 34 43 35 #Fields and tolerances to track changes 36 field_names = ['UGia', 'UGiaRate'] 37 field_tolerances = [1e-13, 1e-13] 38 field_values = [md.results.GiaSolution.UGia, md.results.GiaSolution.UGiaRate] 44 # Test Name: GiaIvinsBenchmarksAB2dA1 45 U_AB2dA1 = md.results.GiaSolution.UGia 46 URate_AB2dA1 = md.results.GiaSolution.UGiaRate 47 48 # Test Name: GiaIvinsBenchmarksAB2dA2 49 # different evaluation time # {{{ 50 md.timestepping.start_time = 2005100 # after 5 kyr of deglaciation 51 md.geometry.thickness[-1, -1] = md.timestepping.start_time 52 53 md = solve(md, 'Gia') 54 55 U_AB2dA2 = md.results.GiaSolution.UGia 56 URate_AB2dA2 = md.results.GiaSolution.UGiaRate 57 # }}} 58 59 # Test Name: GiaIvinsBenchmarksAB2dA3 60 # different evaluation time # {{{ 61 md.timestepping.start_time = 2010100 # after 10 kyr of deglaciation 62 md.geometry.thickness[-1, -1] = md.timestepping.start_time 63 64 md = solve(md, 'Gia') 65 66 U_AB2dA3 = md.results.GiaSolution.UGia 67 URate_AB2dA3 = md.results.GiaSolution.UGiaRate 68 # }}} 69 70 # Fields and tolerances to track changes 71 field_names = ['U_AB2dA1','URate_AB2dA1','U_AB2dA2','URate_AB2dA2','U_AB2dA3','URate_AB2dA3'] 72 field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13] 73 field_values = [U_AB2dA1, URate_AB2dA1, U_AB2dA2, URate_AB2dA2, U_AB2dA3, URate_AB2dA3]  - 
      
../trunk-jpl/test/NightlyRun/test2052.py
1 #Test Name: GiaIvinsBenchmarksAB2dC 11 #Test Name: GiaIvinsBenchmarksAB2dC 2 2 from socket import gethostname 3 3 4 4 import numpy as np … … 10 10 from triangle import * 11 11 12 12 13 # Benchmark experiments (Figure A2cIvins and James, 1999, Geophys. J. Int.)14 md = triangle(model(), '../Exp/RoundFrontEISMINT.exp', 200000 )13 # Benchmark experiments (Figure A2a Ivins and James, 1999, Geophys. J. Int.) 14 md = triangle(model(), '../Exp/RoundFrontEISMINT.exp', 200000.) 15 15 md = setmask(md, '', '') 16 16 md = parameterize(md, '../Par/GiaIvinsBenchmarksCD.py') 17 17 18 # indicate what you want to compute18 # indicate what you want to compute 19 19 md.gia.cross_section_shape = 1 # for square-edged x-section 20 20 21 # define loading history22 md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get el eastic response!21 # evaluation time (termed start_time) 22 md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get elastic response! 23 23 md.timestepping.final_time = 2500000 # 2,500 kyr 24 md.geometry.thickness = np.array([np.append(md.geometry.thickness * 0.0, 0.0),25 np.append(md.geometry.thickness / 2.0, 0.1),26 np.append(md.geometry.thickness, 0.2),27 np.append(md.geometry.thickness, md.timestepping.start_time)]).T28 24 29 #solve for GIA deflection 25 # define loading history 26 md.geometry.thickness = np.array([ 27 np.append(md.geometry.thickness * 0.0, 0.0), 28 np.append(md.geometry.thickness / 2.0, 0.1), 29 np.append(md.geometry.thickness, 0.2), 30 np.append(md.geometry.thickness, md.timestepping.start_time) 31 ]).T 32 33 # find out elements that have zero loads throughout the loading history 34 pos = np.where(np.abs(md.geometry.thickness[0:-2, :].sum(axis=1)) == 0)[0] 35 md.mask.ice_levelset[pos] = 1 # no ice 36 30 37 md.cluster = generic('name', gethostname(), 'np', 3) 31 38 md.verbose = verbose('1111111') 39 40 # solve for GIA deflection 32 41 md = solve(md, 'Gia') 33 42 34 #Fields and tolerances to track changes 35 field_names = ['UGia', 'UGiaRate'] 36 field_tolerances = [1e-13, 1e-13] 37 field_values = [md.results.GiaSolution.UGia, md.results.GiaSolution.UGiaRate] 43 # Test Name: GiaIvinsBenchmarksAB2dC1 44 U_AB2dC1 = md.results.GiaSolution.UGia 45 URate_AB2dC1 = md.results.GiaSolution.UGiaRate 46 47 # Test Name: GiaIvinsBenchmarksAB2dC2 48 # different evaluation time # {{{ 49 md.timestepping.start_time = 1000.3 # for t \approx 1 kyr 50 md.geometry.thickness[-1, -1] = md.timestepping.start_time 51 52 md = solve(md, 'Gia') 53 54 U_AB2dC2 = md.results.GiaSolution.UGia 55 URate_AB2dC2 = md.results.GiaSolution.UGiaRate 56 # }}} 57 58 # Test Name: GiaIvinsBenchmarksAB2dC3 59 # different evaluation time # {{{ 60 md.timestepping.start_time = 2400000 # for t \approx \infty 61 md.geometry.thickness[-1, -1] = md.timestepping.start_time 62 63 md = solve(md, 'Gia') 64 65 U_AB2dC3 = md.results.GiaSolution.UGia 66 URate_AB2dC3 = md.results.GiaSolution.UGiaRate 67 # }}} 68 69 # Fields and tolerances to track changes 70 field_names = ['U_AB2dC1', 'URate_AB2dC1', 'U_AB2dC2', 'URate_AB2dC2', 'U_AB2dC3', 'URate_AB2dC3'] 71 field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13] 72 field_values = [U_AB2dC1, URate_AB2dC1, U_AB2dC2, URate_AB2dC2, U_AB2dC3, URate_AB2dC3]  - 
      
../trunk-jpl/test/NightlyRun/test2053.py
1 #Test Name: GiaIvinsBenchmarksAB2dD 11 #Test Name: GiaIvinsBenchmarksAB2dD 2 2 from socket import gethostname 3 3 4 4 import numpy as np … … 10 10 from triangle import * 11 11 12 12 13 # Benchmark experiments (Figure A2cIvins and James, 1999, Geophys. J. Int.)13 # Benchmark experiments (Figure A2a Ivins and James, 1999, Geophys. J. Int.) 14 14 md = triangle(model(), '../Exp/RoundFrontEISMINT.exp', 200000) 15 15 md = setmask(md, '', '') 16 16 md = parameterize(md, '../Par/GiaIvinsBenchmarksCD.py') 17 17 18 # indicate what you want to compute19 md.gia.cross_section_shape = 2 # for square-edged x - section18 # indicate what you want to compute 19 md.gia.cross_section_shape = 2 # for elliptical edge 20 20 21 #define loading history 22 md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get eleastic response! 23 md.timestepping.final_time = 2500000 # 2, 500 kyr 24 md.geometry.thickness = np.array([np.append(md.geometry.thickness * 0.0, 0.0), 25 np.append(md.geometry.thickness / 2.0, 0.1), 26 np.append(md.geometry.thickness, 0.2), 27 np.append(md.geometry.thickness, md.timestepping.start_time)]).T 21 # evaluation time (termed start_time) 22 md.timestepping.start_time = 0.3 # for t \approx 0 kyr : to get elastic response! 23 md.timestepping.final_time = 2500000 # 2,500 kyr 28 24 29 #solve for GIA deflection 25 # define loading history 26 md.geometry.thickness = np.array([ 27 np.append(md.geometry.thickness * 0.0, 0.0), 28 np.append(md.geometry.thickness / 2.0, 0.1), 29 np.append(md.geometry.thickness, 0.2), 30 np.append(md.geometry.thickness, md.timestepping.start_time) 31 ]).T 32 33 # find out elements that have zero loads throughout the loading history 34 pos = np.where(np.abs(md.geometry.thickness[0:-2, :].sum(axis=1)) == 0)[0] 35 md.mask.ice_levelset[pos] = 1 # no ice 36 30 37 md.cluster = generic('name', gethostname(), 'np', 3) 31 38 md.verbose = verbose('1111111') 39 40 # solve for GIA deflection 32 41 md = solve(md, 'Gia') 33 42 34 #Fields and tolerances to track changes 35 field_names = ['UGia', 'UGiaRate'] 36 field_tolerances = [1e-13, 1e-13] 37 field_values = [md.results.GiaSolution.UGia, md.results.GiaSolution.UGiaRate] 43 # Test Name: GiaIvinsBenchmarksAB2dD1 44 U_AB2dD1 = md.results.GiaSolution.UGia 45 URate_AB2dD1 = md.results.GiaSolution.UGiaRate 46 47 # Test Name: GiaIvinsBenchmarksAB2dD2 48 # different evaluation time # {{{ 49 md.timestepping.start_time = 1000.3 # for t \approx 1 kyr 50 md.geometry.thickness[-1, -1] = md.timestepping.start_time 51 52 md = solve(md, 'Gia') 53 54 U_AB2dD2 = md.results.GiaSolution.UGia 55 URate_AB2dD2 = md.results.GiaSolution.UGiaRate 56 # }}} 57 58 # Test Name: GiaIvinsBenchmarksAB2dD3 59 # different evaluation time # {{{ 60 md.timestepping.start_time = 2400000 # for t \approx \infty 61 md.geometry.thickness[-1, -1] = md.timestepping.start_time 62 63 md = solve(md, 'Gia') 64 65 U_AB2dD3 = md.results.GiaSolution.UGia 66 URate_AB2dD3 = md.results.GiaSolution.UGiaRate 67 # }}} 68 69 # Fields and tolerances to track changes 70 field_names = ['U_AB2dD1', 'URate_AB2dD1', 'U_AB2dD2', 'URate_AB2dD2', 'U_AB2dD3', 'URate_AB2dD3'] 71 field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13] 72 field_values = [U_AB2dD1, URate_AB2dD1, U_AB2dD2, URate_AB2dD2, U_AB2dD3, URate_AB2dD3] 73  - 
      
../trunk-jpl/src/m/solve/WriteData.py
1 from copy import deepcopy 1 2 from struct import pack, error 2 3 import numpy as np 3 4 import pairoptions … … 38 39 # data = full(data) 39 40 # end 40 41 42 # Always make a copy of the the data so that we do not accidently overwrite 43 # any model fields. 44 data = deepcopy(data) 45 41 46 #Scale data if necesarry 42 47 if options.exist('scale'): 43 48 data = np.array(data) … … 147 152 # }}} 148 153 149 154 elif datatype == 'DoubleMat': # {{{ 150 151 155 if isinstance(data, (bool, int, float)): 152 156 data = np.array([data]) 153 157 elif isinstance(data, (list, tuple)): … … 170 174 try: 171 175 fid.write(pack('q', recordlength)) 172 176 except error as Err: 173 raise ValueError('Field {} can not be marshaled, {}, with "number" the leng htof the record.'.format(name, Err))177 raise ValueError('Field {} can not be marshaled, {}, with "number" the length of the record.'.format(name, Err)) 174 178 175 179 #write data code and matrix type: 176 180 fid.write(pack('i', FormatToCode(datatype)))  - 
      
../trunk-jpl/src/m/solve/parseresultsfromdisk.py
17 17 try: 18 18 fid = open(filename, 'rb') 19 19 except IOError as e: 20 raise IOError(" loadresultsfromdisk error message: could not open '{}' for binary reading.".format(filename))20 raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filename)) 21 21 22 22 #initialize results: 23 23 saveres = [] … … 242 242 nlayer = md.materials.numlayers 243 243 degmax = md.love.sh_nmax 244 244 nfreq = md.love.nfreq 245 #for numpy 1.8 + only 246 #temp_field = np.full((degmax + 1, nfreq, nlayer + 1, 6), 0.0) 245 r0 = md.love.r0 246 g0 = md.love.g0 247 mu0 = md.love.mu0 248 rr = md.materials.radius 249 rho = md.materials.density 250 rho_avg = (rho * np.diff(np.power(rr, 3), n=1, axis=0)) / np.diff(np.power(rr, 3).sum()).sum() 247 251 temp_field = np.empty((degmax + 1, nfreq, nlayer + 1, 6)) 248 252 temp_field.fill(0.0) 249 253 for ii in range(degmax + 1): 250 254 for jj in range(nfreq): 251 255 for kk in range(nlayer + 1): 252 ll = ii * (nlayer + 1) * 6 + (kk * 6 + 1) 253 for mm in range(6): 254 temp_field[ii, jj, kk, mm] = field[ll + mm - 1, jj] 256 if kk < nlayer + 1: 257 ll = ii * (nlayer + 1) * 6 + (kk * 6 + 1) + 3 258 temp_field[ii, jj, kk, 0] = field[ll + (1 - 1), jj] * r0 # mm = 4 259 temp_field[ii, jj, kk, 1] = field[ll + (2 - 1), jj] * mu0 # mm = 5 260 temp_field[ii, jj, kk, 2] = field[ll + (3 - 1), jj] * r0 # mm = 6 261 temp_field[ii, jj, kk, 3] = field[ll + (4 - 1), jj] * mu0 # mm = 1 262 temp_field[ii, jj, kk, 4] = field[ll + (5 - 1), jj] * r0 * g0 # mm = 2 263 temp_field[ii, jj, kk, 5] = field[ll + (6 - 1), jj] * g0 # mm = 3 264 print(temp_field) 265 else: # surface 266 ll = ii * (nlayer + 1) * 6 - 2 267 temp_field[ii, jj, kk, 0] = field[ll + (1 - 1), jj] * r0 268 temp_field[ii, jj, kk, 2] = field[ll + (2 - 1), jj] * r0 269 temp_field[ii, jj, kk, 4] = field[ll + (3 - 1), jj] * r0 * g0 270 # surface BC 271 temp_field[ii, jj, kk, 3] = 0 272 if md.love.forcing_type == 9: 273 temp_field[ii, jj, kk, 1] = 0 274 temp_field[ii, jj, kk, 5] = (2 * ii - 1) / r0 - ii * field[ll + (3 - 1), jj] * g0 275 elif md.love.forcing_type == 11: 276 temp_field[ii, jj, kk, 1] = -(2 * (ii - 1) + 1) * rho_avg / 3 277 temp_field[ii, jj, kk, 5] = (2 * ii - 1) / r0 - ii * field[ll + (3 - 1), jj] * g0 255 278 field = temp_field 256 279 257 280 if time != -9999:  - 
      
../trunk-jpl/src/m/solve/parseresultsfromdisk.m
270 270 end 271 271 result.step=step; 272 272 result.field=field; 273 274 273 end 275 274 % }}} 276 275 function result=ReadDataDimensions(fid) % {{{  - 
      
../trunk-jpl/src/m/solve/marshall.py
5 5 ''' 6 6 MARSHALL - outputs a compatible binary file from @model md, for certain solution type. 7 7 8 The routine creates a compatible binary file from @model md9 This binary file will be used for parallel runs in JPL -package8 The routine creates a compatible binary file from @model md 9 This binary file will be used for parallel runs in JPL-package 10 10 11 Usage:12 marshall(md)11 Usage: 12 marshall(md) 13 13 ''' 14 14 if md.verbose.solution: 15 15 print("marshalling file {}.bin".format(md.miscellaneous.name))  - 
      
../trunk-jpl/src/m/classes/geometry.m
89 89 90 90 end % }}} 91 91 function marshall(self,prefix,md,fid) % {{{ 92 disp(md.geometry.thickness) 92 93 WriteData(fid,prefix,'object',self,'fieldname','surface','format','DoubleMat','mattype',1); 93 94 WriteData(fid,prefix,'object',self,'fieldname','thickness','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts); 95 disp(md.geometry.thickness) 96 pause 94 97 WriteData(fid,prefix,'object',self,'fieldname','base','format','DoubleMat','mattype',1); 95 98 WriteData(fid,prefix,'object',self,'fieldname','bed','format','DoubleMat','mattype',1); 96 99 WriteData(fid,prefix,'object',self,'fieldname','hydrostatic_ratio','format','DoubleMat','mattype',1);  - 
      
../trunk-jpl/test/NightlyRun/test2052.m
8 8 md.gia.cross_section_shape=1; % for square-edged x-section 9 9 10 10 % evaluation time (termed start_time) 11 md.timestepping.start_time=0.3; % for t \approx 0 kyr : to get eleastic response!11 md.timestepping.start_time=0.3; % for t \approx 0 kyr: to get elastic response! 12 12 md.timestepping.final_time=2500000; % 2,500 kyr 13 13 14 14 %% define loading history {{{ … … 22 22 23 23 % find out elements that have zero loads throughout the loading history. 24 24 pos = find(sum(abs(md.geometry.thickness(1:end-1,:)),2)==0); 25 md.mask.ice_levelset(pos)=1; % no -ice.25 md.mask.ice_levelset(pos)=1; % no ice 26 26 27 27 md.cluster=generic('name',oshostname(),'np',3); 28 28 md.verbose=verbose('1111111'); … … 30 30 %% solve for GIA deflection 31 31 md=solve(md,'Gia'); 32 32 33 pause 34 33 35 %Test Name: GiaIvinsBenchmarksAB2dC1 34 36 U_AB2dC1 = md.results.GiaSolution.UGia; 35 URate_AB2dC1 = md.results.GiaSolution.UGiaRate; 37 URate_AB2dC1 = md.results.GiaSolution.UGiaRate; 36 38 37 39 %Test Name: GiaIvinsBenchmarksAB2dC2 38 40 %% different evaluation time. {{{  
  Note:
 See   TracBrowser
 for help on using the repository browser.
    ![(please configure the [header_logo] section in trac.ini)](/trac/issm/chrome/common/trac_banner.png)