Index: ../trunk-jpl/test/NightlyRun/test2005.m =================================================================== --- ../trunk-jpl/test/NightlyRun/test2005.m (revision 25687) +++ ../trunk-jpl/test/NightlyRun/test2005.m (revision 25688) @@ -39,7 +39,7 @@ md.mask.ocean_levelset(md.mesh.elements(pos,:))=1; %make sure wherever there is an ice load, that the mask is set to ice: -pos=find(md.solidearth.surfaceload.icethicknesschange); +%pos=find(md.solidearth.surfaceload.icethicknesschange); % TODO: Do we need to do this twice? md.mask.ice_levelset(md.mesh.elements(pos,:))=-1; % }}} @@ -51,7 +51,6 @@ %Materials: md.materials=materials('hydro'); - %Miscellaneous md.miscellaneous.name='test2005'; @@ -85,7 +84,6 @@ end deltathickness(end,:)=0:1:9; md.solidearth.surfaceload.icethicknesschange=deltathickness; - %hack: md.geometry.surface=zeros(md.mesh.numberofvertices,1); md.geometry.thickness=ones(md.mesh.numberofvertices,1); @@ -102,6 +100,6 @@ %Fields and tolerances to track changes field_names={'Sealevel1','Sealevel5','Sealevel10','Seustatic10'}; -field_tolerances={1e-13,1e-13,1e-13,1e-13,1e-13}; +field_tolerances={1e-13,1e-13,1e-13,1e-13}; field_values={S1,S5,S10,Seus10}; Index: ../trunk-jpl/test/NightlyRun/runme.py =================================================================== --- ../trunk-jpl/test/NightlyRun/runme.py (revision 25687) +++ ../trunk-jpl/test/NightlyRun/runme.py (revision 25688) @@ -113,7 +113,7 @@ # }}} #Process Ids according to benchmarks {{{ if benchmark == 'nightly': - test_ids = test_ids.intersection(set(range(1, 1000))) + test_ids = test_ids.intersection(set(range(1, 1000)).union(set(range(2001, 2500)))) elif benchmark == 'validation': test_ids = test_ids.intersection(set(range(1001, 2000))) elif benchmark == 'ismip': Index: ../trunk-jpl/test/NightlyRun/test2005.py =================================================================== --- ../trunk-jpl/test/NightlyRun/test2005.py (nonexistent) +++ ../trunk-jpl/test/NightlyRun/test2005.py (revision 25688) @@ -0,0 +1,116 @@ +#Test Name: EarthSlr +import numpy as np + +from gmshplanet import * +from gmtmask import * +from lovenumbers import * +from materials import * +from model import * +from parameterize import * +from paterson import * +from solve import * + + +# Mesh earth +md = model() +md.mesh = gmshplanet('radius', 6.371012 * 1e3, 'resolution', 700.) #700 km resolution mesh + +# Parameterize solidearth solution +# Solidearth loading #{{{ +md.solidearth.surfaceload.icethicknesschange = np.zeros((md.mesh.numberofelements, 1)) +md.solidearth.sealevel = np.zeros((md.mesh.numberofvertices, 1)) +md.dsl.global_average_thermosteric_sea_level_change = np.zeros((2, 1)) +md.dsl.sea_surface_height_change_above_geoid = np.zeros((md.mesh.numberofvertices + 1, 1)) +md.dsl.sea_water_pressure_change_at_sea_floor = np.zeros((md.mesh.numberofvertices + 1, 1)) + +# Antarctica +late = md.mesh.lat[md.mesh.elements - 1].sum(axis=1) / 3 +longe = md.mesh.long[md.mesh.elements - 1].sum(axis=1) / 3 +pos = np.where(late < -80)[0] +md.solidearth.surfaceload.icethicknesschange[pos] = -100 +# Greenland +pos = np.where(np.logical_and.reduce((late > 70, late < 80, longe > -60, longe < -30)))[0] +md.solidearth.surfaceload.icethicknesschange[pos] = -100 + +# Elastic loading from love numbers +md.solidearth.lovenumbers = lovenumbers('maxdeg', 100) +#}}} + +# Mask #{{{ +mask = gmtmask(md.mesh.lat, md.mesh.long) +icemask = np.ones((md.mesh.numberofvertices, 1)) +pos = np.where(mask == 0)[0] +icemask[pos] = -1 +pos = np.where(mask[md.mesh.elements - 1].sum(axis=1) < 3)[0] +icemask[md.mesh.elements[pos, :] - 1] = -1 +md.mask.ice_levelset = icemask +md.mask.ocean_levelset = -icemask + +# Make sure that the elements that have loads are fully grounded +pos = np.nonzero(md.solidearth.surfaceload.icethicknesschange)[0] +md.mask.ocean_levelset[md.mesh.elements[pos, :] - 1] = 1 + +# Make sure wherever there is an ice load, that the mask is set to ice: +#pos = np.nonzero(md.solidearth.surfaceload.icethicknesschange)[0] # TODO: Do we need to do this twice? +md.mask.ice_levelset[md.mesh.elements[pos, :] - 1] = -1 +# }}} + +md.solidearth.settings.ocean_area_scaling = 0 + +# Geometry for the bed; arbitrary +md.geometry.bed = -np.ones((md.mesh.numberofvertices, 1)) + +# Materials +md.materials = materials('hydro') + +# Miscellaneous +md.miscellaneous.name = 'test2005' + +# Solution parameters +md.solidearth.settings.reltol = np.nan +md.solidearth.settings.abstol = 1e-3 +md.solidearth.settings.computesealevelchange = 1 + +# Max number of iterations reverted back to 10 (i.e. the original default value) +md.solidearth.settings.maxiter = 10 + +# Eustatic + rigid + elastic + rotation run +md.solidearth.settings.rigid = 1 +md.solidearth.settings.elastic = 1 +md.solidearth.settings.rotation = 1 + +# Transient settings +md.timestepping.start_time = 0 +md.timestepping.final_time = 10 +md.timestepping.time_step = 1 +md.transient.isslr = 1 +md.transient.issmb = 0 +md.transient.isgia = 1 +md.transient.ismasstransport = 0 +md.transient.isstressbalance = 0 +md.transient.isthermal = 0 +dh = np.asarray(md.solidearth.surfaceload.icethicknesschange).T +deltathickness = np.zeros((md.mesh.numberofelements + 1, 10)) +for i in range(10): + deltathickness[0:-1, i] = dh * (i + 1) +deltathickness[-1, :] = np.arange(0, 10, 1) +md.solidearth.surfaceload.icethicknesschange = deltathickness + +# Hack +md.geometry.surface = np.zeros((md.mesh.numberofvertices, 1)) +md.geometry.thickness = np.ones((md.mesh.numberofvertices, 1)) +md.geometry.base = -np.ones((md.mesh.numberofvertices, 1)) +md.geometry.bed = md.geometry.base + +# Run transient solution +md = solve(md, 'Transient') + +S1 = md.results.TransientSolution[1 - 1].Sealevel +S5 = md.results.TransientSolution[5 - 1].Sealevel +S10 = md.results.TransientSolution[10 - 1].Sealevel +Seus10 = md.results.TransientSolution[10 - 1].Bslr + +#Fields and tolerances to track changes +field_names = ['Sealevel1', 'Sealevel5', 'Sealevel10', 'Seustatic10'] +field_tolerances = [1e-13, 1e-13, 1e-13, 1e-13] +field_values = [S1, S5, S10, Seus10] Index: ../trunk-jpl/test/NightlyRun/test2002.py =================================================================== --- ../trunk-jpl/test/NightlyRun/test2002.py (revision 25687) +++ ../trunk-jpl/test/NightlyRun/test2002.py (revision 25688) @@ -51,7 +51,7 @@ md.mask.ocean_levelset[md.mesh.elements[pos, :] - 1] = 1 #make sure wherever there is an ice load, that the mask is set to ice: -#pos = np.nonzero(md.solidearth.surfaceload.icethicknesschange)[0] # Do we need to do this twice? +#pos = np.nonzero(md.solidearth.surfaceload.icethicknesschange)[0] # TODO: Do we need to do this twice? md.mask.ice_levelset[md.mesh.elements[pos, :] - 1] = -1 # }}} @@ -71,7 +71,7 @@ md.solidearth.settings.abstol = 1e-3 md.solidearth.settings.computesealevelchange = 1 -#max number of iteration reverted back to 10 (i.e., the original default value) +#max number of iterations reverted back to 10 (i.e., the original default value) md.solidearth.settings.maxiter = 10 #eustatic run: Index: ../trunk-jpl/src/m/classes/SMBpddSicopolis.py =================================================================== --- ../trunk-jpl/src/m/classes/SMBpddSicopolis.py (revision 25687) +++ ../trunk-jpl/src/m/classes/SMBpddSicopolis.py (revision 25688) @@ -1,29 +1,29 @@ import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay -from checkfield import checkfield +from helpers import * +from MatlabFuncs import * +from project3d import project3d from WriteData import WriteData -from project3d import project3d -from MatlabFuncs import * -from helpers import * class SMBpddSicopolis(object): - """ - SMBpddSicopolis Class definition + """SMBPDDSICOPOLIS class definition Usage: SMBpddSicopolis = SMBpddSicopolis() -""" + """ - def __init__(self): # {{{ - self.precipitation = float('NaN') - self.monthlytemperatures = float('NaN') - self.temperature_anomaly = float('NaN') - self.precipitation_anomaly = float('NaN') - self.smb_corr = float('NaN') + def __init__(self, *args): # {{{ + self.precipitation = np.nan + self.monthlytemperatures = np.nan + self.temperature_anomaly = np.nan + self.precipitation_anomaly = np.nan + self.smb_corr = np.nan self.desfac = 0 - self.s0p = float('NaN') - self.s0t = float('NaN') + self.s0p = np.nan + self.s0t = np.nan self.rlaps = 0 self.isfirnwarming = 0 self.steps_per_step = 1 @@ -30,42 +30,35 @@ self.averaging = 0 self.requested_outputs = [] - self.setdefaultparameters() + nargs = len(args) + if nargs == 0: + self.setdefaultparameters() + else: + raise Exception('constructor not supported') # }}} def __repr__(self): # {{{ - string = ' surface forcings parameters:' - string += '\n SICOPOLIS PDD scheme (Calov & Greve, 2005) :' - - string = "%s\n%s" % (string, fielddisplay(self, 'monthlytemperatures', 'monthly surface temperatures [K]')) - string = "%s\n%s" % (string, fielddisplay(self, 'precipitation', 'monthly surface precipitation [m/yr water eq]')) - string = "%s\n%s" % (string, fielddisplay(self, 'temperature_anomaly', 'anomaly to monthly reference temperature (additive [K])')) - string = "%s\n%s" % (string, fielddisplay(self, 'precipitation_anomaly', 'anomaly to monthly precipitation (multiplicative, e.g. q = q0*exp(0.070458*DeltaT) after Huybrechts (2002)) [no unit])')) - string = "%s\n%s" % (string, fielddisplay(self, 'smb_corr', 'correction of smb after PDD call [m/a]')) - string = "%s\n%s" % (string, fielddisplay(self, 's0p', 'should be set to elevation from precip source (between 0 and a few 1000s m, default is 0) [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 's0t', 'should be set to elevation from temperature source (between 0 and a few 1000s m, default is 0) [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'rlaps', 'present day lapse rate (default is 7.4 degree/km)')) - string = "%s\n%s" % (string, fielddisplay(self, 'desfac', 'desertification elevation factor (default is -log(2.0)/1000)')) - string = "%s\n%s" % (string, fielddisplay(self, 'isfirnwarming', 'is firnwarming (Reeh 1991) activated (0 or 1, default is 1)')) - string = "%s\n%s" % (string, fielddisplay(self, 'steps_per_step', 'number of smb steps per time step')) - string = "%s\n%s" % (string, fielddisplay(self, 'averaging', 'averaging methods from short to long steps')) - string = "%s\n\t\t%s" % (string, '0: Arithmetic (default)') - string = "%s\n\t\t%s" % (string, '1: Geometric') - string = "%s\n\t\t%s" % (string, '2: Harmonic') - - string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested (TemperaturePDD, SmbAccumulation, SmbMelt)')) + s = ' surface forcings parameters:\n' + s += ' SICOPOLIS PDD scheme (Calov & Greve, 2005):\n' + s += '{}\n'.format(fielddisplay(self, 'monthlytemperatures', 'monthly surface temperatures [K]')) + s += '{}\n'.format(fielddisplay(self, 'precipitation', 'monthly surface precipitation [m/yr water eq]')) + s += '{}\n'.format(fielddisplay(self, 'temperature_anomaly', 'anomaly to monthly reference temperature (additive [K])')) + s += '{}\n'.format(fielddisplay(self, 'precipitation_anomaly', 'anomaly to monthly precipitation (multiplicative, e.g. q = q0*exp(0.070458*DeltaT) after Huybrechts (2002)) [no unit])')) + s += '{}\n'.format(fielddisplay(self, 'smb_corr', 'correction of smb after PDD call [m/a]')) + s += '{}\n'.format(fielddisplay(self, 's0p', 'should be set to elevation from precip source (between 0 and a few 1000s m, default is 0) [m]')) + s += '{}\n'.format(fielddisplay(self, 's0t', 'should be set to elevation from temperature source (between 0 and a few 1000s m, default is 0) [m]')) + s += '{}\n'.format(fielddisplay(self, 'rlaps', 'present day lapse rate (default is 7.4 degree/km)')) + s += '{}\n'.format(fielddisplay(self, 'desfac', 'desertification elevation factor (default is -log(2.0)/1000)')) + s += '{}\n'.format(fielddisplay(self, 'isfirnwarming', 'is firnwarming (Reeh 1991) activated (0 or 1, default is 1)')) + s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step')) + s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps')) + s += '\t\t{}\n'.format('0: Arithmetic (default)') + s += '\t\t{}\n'.format('1: Geometric') + s += '\t\t{}\n'.format('2: Harmonic') + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested (TemperaturePDD, SmbAccumulation, SmbMelt)')) + return s # }}} - @staticmethod - def SMBpddSicopolis(*args): # {{{ - nargin = len(args) - - if nargin == 0: - return SMBpddSicopolis() - else: - raise RuntimeError('SMBpddSicopolis: constructor not supported') - # }}} - def extrude(self, md): # {{{ self.precipitation = project3d(md, 'vector', self.precipitation, 'type', 'node') self.monthlytemperatures = project3d(md, 'vector', self.monthlytemperatures, 'type', 'node') @@ -82,7 +75,6 @@ # }}} def initialize(self, md): # {{{ - if np.isnan(self.s0p): self.s0p = np.zeros((md.mesh.numberofvertices, )) print(' no SMBpddSicopolis.s0p specified: values set as zero') @@ -102,6 +94,7 @@ if np.isnan(self.smb_corr): self.smb_corr = np.zeros((md.mesh.numberofvertices, )) print(' no SMBpddSicopolis.smb_corr specified: values set as zero') + return self # }}} def setdefaultparameters(self): # {{{ @@ -108,13 +101,12 @@ self.isfirnwarming = 1 self.desfac = -np.log(2.0) / 1000 self.rlaps = 7.4 - + return self # }}} def checkconsistency(self, md, solution, analyses): # {{{ - if (strcmp(solution, 'TransientSolution') and md.transient.issmb == 0): + if solution == 'TransientSolution' and not md.transient.issmb: return - if 'MasstransportAnalysis' in analyses: md = checkfield(md, 'fieldname', 'smb.desfac', '<=', 1, 'numel', 1) md = checkfield(md, 'fieldname', 'smb.s0p', '>=', 0, 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1]) @@ -122,26 +114,20 @@ md = checkfield(md, 'fieldname', 'smb.rlaps', '>=', 0, 'numel', 1) md = checkfield(md, 'fieldname', 'smb.monthlytemperatures', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 12]) md = checkfield(md, 'fieldname', 'smb.precipitation', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 12]) - md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1]) md = checkfield(md, 'fieldname', 'smb.averaging', 'numel', [1], 'values', [0, 1, 2]) md = checkfield(md, 'fieldname', 'smb.requested_outputs', 'stringrow', 1) - return md # }}} def marshall(self, prefix, md, fid): # {{{ - yts = md.constants.yts - WriteData(fid, prefix, 'name', 'md.smb.model', 'data', 10, 'format', 'Integer') - WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'isfirnwarming', 'format', 'Boolean') WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'desfac', 'format', 'Double') WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 's0p', 'format', 'DoubleMat', 'mattype', 1) WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 's0t', 'format', 'DoubleMat', 'mattype', 1) WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'rlaps', 'format', 'Double') - WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'monthlytemperatures', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'precipitation', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts) WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'temperature_anomaly', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts) @@ -158,5 +144,4 @@ outputs = [outputs, defaultoutputs(self, md)] #add defaults WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray') - # }}} Index: ../trunk-jpl/src/m/classes/giaivins.py =================================================================== --- ../trunk-jpl/src/m/classes/giaivins.py (revision 25687) +++ ../trunk-jpl/src/m/classes/giaivins.py (revision 25688) @@ -7,11 +7,10 @@ class giaivins(object): - """ - GIA class definition for Ivins and James model + """GIA class definition for Ivins and James model - Usage: - giaivins = giaivins() + Usage: + giaivins = giaivins() """ def __init__(self, *args): #{{{ @@ -20,7 +19,6 @@ self.cross_section_shape = 0 nargin = len(args) - if nargin == 0: self.setdefaultparameters() else: @@ -29,15 +27,15 @@ def __repr__(self): #{{{ s = ' giaivins parameters:\n' - s += "{}\n".format(fielddisplay(self, 'mantle_viscosity', 'mantle viscosity [Pa s]')) - s += "{}\n".format(fielddisplay(self, 'lithosphere_thickness', 'lithosphere thickness (km)')) - s += "{}\n".format(fielddisplay(self, 'cross_section_shape', "1: square-edged (default). 2: elliptical. See iedge in GiaDeflectionCore")) - + s += '{}\n'.format(fielddisplay(self, 'mantle_viscosity', 'mantle viscosity [Pa s]')) + s += '{}\n'.format(fielddisplay(self, 'lithosphere_thickness', 'lithosphere thickness (km)')) + s += '{}\n'.format(fielddisplay(self, 'cross_section_shape', "1: square-edged (default). 2: elliptical. See iedge in GiaDeflectionCore")) return s #}}} def setdefaultparameters(self): #{{{ self.cross_section_shape = 1 + return self #}}} def checkconsistency(self, md, solution, analyses): #{{{ @@ -48,9 +46,15 @@ md = checkfield(md, 'fieldname', 'gia.lithosphere_thickness', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices], '>', 0) md = checkfield(md, 'fieldname', 'gia.cross_section_shape', 'numel', [1], 'values', [1, 2]) - #be sure that if we are running a masstransport ice flow model coupled with giaivins, that thickness forcings - #are not provided into the future. - + # Be sure that if we are running a masstransport ice flow model coupled + # with giaivins, that thickness forcings are not provided into the + # future. + if solution == 'TransientSolution' and md.transient.ismasstransport and md.transient.isgia: + # Figure out if thickness is a transient forcing + if md.geometry.thickness.shape[0] == md.mesh.numberofvertices + 1: + # Recover the furthest time "in time" + if md.geometry.thickness[-1, -1] != md.timestepping.start_time: + md.checkmessage('if masstransport is on, transient thickness forcing for the giaivins model should not be provided in the future. Synchronize your start_time to correspond to the most recent transient thickness forcing timestep') return md #}}} Index: ../trunk-jpl/src/m/classes/rotational.py =================================================================== --- ../trunk-jpl/src/m/classes/rotational.py (revision 25687) +++ ../trunk-jpl/src/m/classes/rotational.py (revision 25688) @@ -6,12 +6,11 @@ class rotational(object): - ''' - ROTATIONAL class definition + """ROTATIONAL class definition - Usage: - rotational = rotational() - ''' + Usage: + rotational = rotational() + """ def __init__(self, *args): #{{{ self.equatorialmoi = 0 @@ -19,7 +18,6 @@ self.langularvelocity = 0 nargin = len(args) - if nargin == 0: self.setdefaultparameters() else: @@ -31,27 +29,25 @@ s += '{}\n'.format(fielddisplay(self, 'equatorialmoi', 'mean equatorial moment of inertia [kg m^2]')) s += '{}\n'.format(fielddisplay(self, 'polarmoi', 'polar moment of inertia [kg m^2]')) s += '{}\n'.format(fielddisplay(self, 'angularvelocity', 'mean rotational velocity of earth [per second]')) - return s #}}} def setdefaultparameters(self): # {{{ - #moment of inertia + # Moment of inertia self.equatorialmoi = 8.0077e37 # [kg m^2] self.polarmoi = 8.0345e37 # [kg m^2] - #mean rotational velocity of earth + # Mean rotational velocity of earth self.angularvelocity = 7.2921e-5 # [s^-1] + return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and md.transient.isslr == 0): + if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isslr): return md - md = checkfield(md, 'fieldname', 'solidearth.rotational.equatorialmoi', 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'solidearth.rotational.polarmoi', 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'solidearth.rotational.angularvelocity', 'NaN', 1, 'Inf', 1) - return md #}}} Index: ../trunk-jpl/src/m/classes/SMBforcing.m =================================================================== --- ../trunk-jpl/src/m/classes/SMBforcing.m (revision 25687) +++ ../trunk-jpl/src/m/classes/SMBforcing.m (revision 25688) @@ -31,14 +31,10 @@ end end % }}} function list = defaultoutputs(self,md) % {{{ - list = {''}; - end % }}} function self = extrude(self,md) % {{{ - self.mass_balance=project3d(md,'vector',self.mass_balance,'type','node'); - end % }}} function self = initialize(self,md) % {{{ @@ -49,9 +45,7 @@ end % }}} function md = checkconsistency(self,md,solution,analyses) % {{{ - if (strcmp(solution,'TransientSolution') & md.transient.issmb == 0), return; end - if ismember('MasstransportAnalysis',analyses), md = checkfield(md,'fieldname','smb.mass_balance','timeseries',1,'NaN',1,'Inf',1); end Index: ../trunk-jpl/src/m/classes/nodalvalue.m =================================================================== --- ../trunk-jpl/src/m/classes/nodalvalue.m (revision 25687) +++ ../trunk-jpl/src/m/classes/nodalvalue.m (revision 25688) @@ -59,10 +59,10 @@ end % }}} function md = marshall(self,prefix,md,fid) % {{{ - WriteData(fid,prefix,'data',self.name,'name','md.nodalvalue.name','format','String'); - WriteData(fid,prefix,'data',self.definitionstring,'name','md.nodalvalue.definitionenum','format','String'); - WriteData(fid,prefix,'data',self.model_string,'name','md.nodalvalue.model_enum','format','String'); - WriteData(fid,prefix,'data',self.node,'name','md.nodalvalue.node','format','Integer'); + WriteData(fid,prefix,'data',self.name,'name','md.nodalvalue.name','format','String'); + WriteData(fid,prefix,'data',self.definitionstring,'name','md.nodalvalue.definitionenum','format','String'); + WriteData(fid,prefix,'data',self.model_string,'name','md.nodalvalue.model_enum','format','String'); + WriteData(fid,prefix,'data',self.node,'name','md.nodalvalue.node','format','Integer'); end % }}} end Index: ../trunk-jpl/src/m/classes/dsl.py =================================================================== --- ../trunk-jpl/src/m/classes/dsl.py (revision 25687) +++ ../trunk-jpl/src/m/classes/dsl.py (revision 25688) @@ -7,12 +7,11 @@ class dsl(object): - ''' - DSL - slass definition + """DSL - slass definition - Usage: - dsl = dsl() - ''' + Usage: + dsl = dsl() + """ def __init__(self): #{{{ self.global_average_thermosteric_sea_level_change = 0 #corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr) @@ -22,12 +21,11 @@ #}}} def __repr__(self): #{{{ - s = " dsl parameters:" - s = "%s\n%s" % (s, fielddisplay(self, 'global_average_thermosteric_sea_level_change', 'corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr)')) - s = "%s\n%s" % (s, fielddisplay(self, 'sea_surface_height_change_above_geoid', 'corresponds to zos field in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr)')) - s = "%s\n%s" % (s, fielddisplay(self, 'sea_water_pressure_change_at_sea_floor', 'corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable rate (in Pa/yr)')) - s = "%s\n%s" % (s, fielddisplay(self, 'compute_fingerprints', 'do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid')) - + s = ' dsl parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'global_average_thermosteric_sea_level_change', 'corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr)')) + s += '{}\n'.format(fielddisplay(self, 'sea_surface_height_change_above_geoid', 'corresponds to zos field in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr)')) + s += '{}\n'.format(fielddisplay(self, 'sea_water_pressure_change_at_sea_floor', 'corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable rate (in Pa/yr)')) + s += '{}\n'.format(fielddisplay(self, 'compute_fingerprints', 'do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid')) return s #}}} @@ -42,21 +40,18 @@ #}}} def checkconsistency(self, md, solution, analyses): #{{{ - # Early retun + # Early return if 'SealevelriseAnalysis' not in analyses: return md - if solution == 'TransientSolution' and md.transient.isslr == 0: return md - md = checkfield(md, 'fieldname', 'dsl.global_average_thermosteric_sea_level_change', 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'dsl.sea_surface_height_change_above_geoid', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'dsl.sea_water_pressure_change_at_sea_floor', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'dsl.compute_fingerprints', 'NaN', 1, 'Inf', 1, 'values', [0, 1]) - if self.compute_fingerprints: - #check geodetic flag of slr is on: - if md.slr.geodetic == 0: + # Check if geodetic flag of slr is on + if not md.slr.geodetic: raise RuntimeError('DSL checkconsistency error message: if bottom pressure fingerprints computations are requested, slr class should have geodetic flag on') return md # }}} Index: ../trunk-jpl/src/m/classes/stressbalance.py =================================================================== --- ../trunk-jpl/src/m/classes/stressbalance.py (revision 25687) +++ ../trunk-jpl/src/m/classes/stressbalance.py (revision 25688) @@ -1,18 +1,19 @@ +import sys + import numpy as np -import sys + +from checkfield import checkfield +from fielddisplay import fielddisplay +import MatlabFuncs as m from project3d import project3d -from fielddisplay import fielddisplay -from checkfield import checkfield from WriteData import WriteData -import MatlabFuncs as m class stressbalance(object): - """ - STRESSBALANCE class definition + """STRESSBALANCE class definition - Usage: - stressbalance = stressbalance() + Usage: + stressbalance = stressbalance() """ def __init__(self): # {{{ @@ -40,37 +41,31 @@ #}}} def __repr__(self): # {{{ - - string = ' StressBalance solution parameters:' - string = "%s\n%s" % (string, ' Convergence criteria:') - string = "%s\n%s" % (string, fielddisplay(self, 'restol', 'mechanical equilibrium residual convergence criterion')) - string = "%s\n%s" % (string, fielddisplay(self, 'reltol', 'velocity relative convergence criterion, NaN: not applied')) - string = "%s\n%s" % (string, fielddisplay(self, 'abstol', 'velocity absolute convergence criterion, NaN: not applied')) - string = "%s\n%s" % (string, fielddisplay(self, 'isnewton', "0: Picard's fixed point, 1: Newton's method, 2: hybrid")) - string = "%s\n%s" % (string, fielddisplay(self, 'maxiter', 'maximum number of nonlinear iterations')) - - string = "%s\n%s" % (string, '\n boundary conditions:') - string = "%s\n%s" % (string, fielddisplay(self, 'spcvx', 'x - axis velocity constraint (NaN means no constraint) [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'spcvy', 'y - axis velocity constraint (NaN means no constraint) [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'spcvz', 'z - axis velocity constraint (NaN means no constraint) [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'icefront', 'segments on ice front list (last column 0: Air, 1: Water, 2: Ice')) - - string = "%s\n%s" % (string, '\n Rift options:') - string = "%s\n%s" % (string, fielddisplay(self, 'rift_penalty_threshold', 'threshold for instability of mechanical constraints')) - string = "%s\n%s" % (string, fielddisplay(self, 'rift_penalty_lock', 'number of iterations before rift penalties are locked')) - - string = "%s\n%s" % (string, '\n Penalty options:') - string = "%s\n%s" % (string, fielddisplay(self, 'penalty_factor', 'offset used by penalties: penalty = Kmax * 1.0**offset')) - string = "%s\n%s" % (string, fielddisplay(self, 'vertex_pairing', 'pairs of vertices that are penalized')) - - string = "%s\n%s" % (string, '\n Other:') - string = "%s\n%s" % (string, fielddisplay(self, 'shelf_dampening', 'use dampening for floating ice ? Only for FS model')) - string = "%s\n%s" % (string, fielddisplay(self, 'FSreconditioning', 'multiplier for incompressibility equation. Only for FS model')) - string = "%s\n%s" % (string, fielddisplay(self, 'referential', 'local referential')) - string = "%s\n%s" % (string, fielddisplay(self, 'loadingforce', 'loading force applied on each point [N / m^3]')) - string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) - - return string + s = ' StressBalance solution parameters:\n' + s += ' Convergence criteria:\n' + s += '{}\n'.format(fielddisplay(self, 'restol', 'mechanical equilibrium residual convergence criterion')) + s += '{}\n'.format(fielddisplay(self, 'reltol', 'velocity relative convergence criterion, NaN: not applied')) + s += '{}\n'.format(fielddisplay(self, 'abstol', 'velocity absolute convergence criterion, NaN: not applied')) + s += '{}\n'.format(fielddisplay(self, 'isnewton', "0: Picard's fixed point, 1: Newton's method, 2: hybrid")) + s += '{}\n'.format(fielddisplay(self, 'maxiter', 'maximum number of nonlinear iterations')) + s += ' boundary conditions:\n' + s += '{}\n'.format(fielddisplay(self, 'spcvx', 'x - axis velocity constraint (NaN means no constraint) [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'spcvy', 'y - axis velocity constraint (NaN means no constraint) [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'spcvz', 'z - axis velocity constraint (NaN means no constraint) [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'icefront', 'segments on ice front list (last column 0: Air, 1: Water, 2: Ice')) + s = "%s\n%s" % (s, '\n Rift options:') + s += '{}\n'.format(fielddisplay(self, 'rift_penalty_threshold', 'threshold for instability of mechanical constraints')) + s += '{}\n'.format(fielddisplay(self, 'rift_penalty_lock', 'number of iterations before rift penalties are locked')) + s += ' Penalty options:\n' + s += '{}\n'.format(fielddisplay(self, 'penalty_factor', 'offset used by penalties: penalty = Kmax * 1.0**offset')) + s += '{}\n'.format(fielddisplay(self, 'vertex_pairing', 'pairs of vertices that are penalized')) + s += ' Other:\n' + s += '{}\n'.format(fielddisplay(self, 'shelf_dampening', 'use dampening for floating ice ? Only for FS model')) + s += '{}\n'.format(fielddisplay(self, 'FSreconditioning', 'multiplier for incompressibility equation. Only for FS model')) + s += '{}\n'.format(fielddisplay(self, 'referential', 'local referential')) + s += '{}\n'.format(fielddisplay(self, 'loadingforce', 'loading force applied on each point [N / m^3]')) + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested')) + return s #}}} def extrude(self, md): # {{{ @@ -102,7 +97,6 @@ self.rift_penalty_lock = 10 #output default: self.requested_outputs = ['default'] - return self #}}} @@ -112,13 +106,14 @@ else: list = ['Vx', 'Vy', 'Vel', 'Pressure'] return list - #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if 'StressbalanceAnalysis' not in analyses: return md + if solution == 'TransientSolution' and not md.transient.isstressbalance: + return md md = checkfield(md, 'fieldname', 'stressbalance.spcvx', 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'stressbalance.spcvy', 'Inf', 1, 'timeseries', 1) @@ -158,7 +153,6 @@ pos = np.nonzero(np.logical_and(md.mask.ocean_levelset, md.mesh.vertexonbase)) if np.any(np.logical_not(np.isnan(md.stressbalance.referential[pos, :]))): md.checkmessage("no referential should be specified for basal vertices of grounded ice") - return md # }}} @@ -182,13 +176,11 @@ WriteData(fid, prefix, 'object', self, 'class', 'stressbalance', 'fieldname', 'rift_penalty_lock', 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'class', 'stressbalance', 'fieldname', 'rift_penalty_threshold', 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'class', 'stressbalance', 'fieldname', 'referential', 'format', 'DoubleMat', 'mattype', 1) - if isinstance(self.loadingforce, (list, tuple, np.ndarray)) and np.size(self.loadingforce, 1) == 3: WriteData(fid, prefix, 'data', self.loadingforce[:, 0], 'format', 'DoubleMat', 'mattype', 1, 'name', 'md.stressbalance.loadingforcex') WriteData(fid, prefix, 'data', self.loadingforce[:, 1], 'format', 'DoubleMat', 'mattype', 1, 'name', 'md.stressbalance.loadingforcey') WriteData(fid, prefix, 'data', self.loadingforce[:, 2], 'format', 'DoubleMat', 'mattype', 1, 'name', 'md.stressbalance.loadingforcez') - - #process requested outputs + # Process requested outputs outputs = self.requested_outputs indices = [i for i, x in enumerate(outputs) if x == 'default'] if len(indices) > 0: Index: ../trunk-jpl/src/m/classes/frictionshakti.py =================================================================== --- ../trunk-jpl/src/m/classes/frictionshakti.py (revision 25687) +++ ../trunk-jpl/src/m/classes/frictionshakti.py (revision 25688) @@ -25,9 +25,9 @@ #}}} def __repr__(self): # {{{ - s = "Basal shear stress parameters: Sigma_b = coefficient^2 * Neff * u_b\n(effective stress Neff = rho_ice * g * thickness + rho_water * g * (head - b))\n" + s = 'Basal shear stress parameters: Sigma_b = coefficient^2 * Neff * u_b\n' + s += '(effective stress Neff = rho_ice * g * thickness + rho_water * g * (head - b))\n' s += "{}\n".format(fielddisplay(self, "coefficient", "friction coefficient [SI]")) - return s #}}} @@ -44,9 +44,7 @@ # Early return if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: return md - md = checkfield(md, 'fieldname', 'friction.coefficient', 'timeseries', 1, 'NaN', 1, 'Inf', 1) - return md # }}} Index: ../trunk-jpl/src/m/classes/flowequation.m =================================================================== --- ../trunk-jpl/src/m/classes/flowequation.m (revision 25687) +++ ../trunk-jpl/src/m/classes/flowequation.m (revision 25688) @@ -12,7 +12,7 @@ isHO = 0; isFS = 0; isNitscheBC = 0; - FSNitscheGamma = 1e6; + FSNitscheGamma = 1e6; fe_SSA = ''; fe_HO = ''; fe_FS = ''; Index: ../trunk-jpl/src/m/classes/slr.py =================================================================== --- ../trunk-jpl/src/m/classes/slr.py (revision 25687) +++ ../trunk-jpl/src/m/classes/slr.py (revision 25688) @@ -49,79 +49,77 @@ #}}} def __repr__(self): # {{{ - s = ' slr parameters:' - s = "%s\n%s" % (s, fielddisplay(self, 'deltathickness', 'thickness change: ice height equivalent [m]')) - s = "%s\n%s" % (s, fielddisplay(self, 'sealevel', 'current sea level (prior to computation) [m]')) - s = "%s\n%s" % (s, fielddisplay(self, 'spcthickness', 'thickness constraints (NaN means no constraint) [m]')) - s = "%s\n%s" % (s, fielddisplay(self, 'reltol', 'sea level rise relative convergence criterion, (NaN: not applied)')) - s = "%s\n%s" % (s, fielddisplay(self, 'abstol', 'sea level rise absolute convergence criterion, (default, NaN: not applied)')) - s = "%s\n%s" % (s, fielddisplay(self, 'maxiter', 'maximum number of nonlinear iterations')) - s = "%s\n%s" % (s, fielddisplay(self, 'love_h', 'load Love number for radial displacement')) - s = "%s\n%s" % (s, fielddisplay(self, 'love_k', 'load Love number for gravitational potential perturbation')) - s = "%s\n%s" % (s, fielddisplay(self, 'love_l', 'load Love number for horizontal displaements')) - s = "%s\n%s" % (s, fielddisplay(self, 'tide_love_k', 'tidal load Love number (degree 2)')) - s = "%s\n%s" % (s, fielddisplay(self, 'tide_love_h', 'tidal load Love number (degree 2)')) - s = "%s\n%s" % (s, fielddisplay(self, 'fluid_love', 'secular fluid Love number')) - s = "%s\n%s" % (s, fielddisplay(self, 'equatorial_moi', 'mean equatorial moment of inertia [kg m^2]')) - s = "%s\n%s" % (s, fielddisplay(self, 'polar_moi', 'polar moment of inertia [kg m^2]')) - s = "%s\n%s" % (s, fielddisplay(self, 'angular_velocity', 'mean rotational velocity of earth [per second]')) - s = "%s\n%s" % (s, fielddisplay(self, 'ocean_area_scaling', 'correction for model representation of ocean area [default: No correction]')) - s = "%s\n%s" % (s, fielddisplay(self, 'hydro_rate', 'rate of hydrological expansion [mm / yr]')) - s = "%s\n%s" % (s, fielddisplay(self, 'geodetic', 'compute geodetic SLR? (in addition to steric?) default 0')) - s = "%s\n%s" % (s, fielddisplay(self, 'geodetic_run_frequency', 'how many time steps we skip before we run SLR solver during transient (default: 1)')) - s = "%s\n%s" % (s, fielddisplay(self, 'rigid', 'rigid earth graviational potential perturbation')) - s = "%s\n%s" % (s, fielddisplay(self, 'elastic', 'elastic earth graviational potential perturbation')) - s = "%s\n%s" % (s, fielddisplay(self, 'rotation', 'earth rotational potential perturbation')) - s = "%s\n%s" % (s, fielddisplay(self, 'degacc', 'accuracy (default .01 deg) for numerical discretization of the Green''s functions')) - s = "%s\n%s" % (s, fielddisplay(self, 'transitions', 'indices into parts of the mesh that will be icecaps')) - s = "%s\n%s" % (s, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) - + s = ' slr parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'deltathickness', 'thickness change: ice height equivalent [m]')) + s += '{}\n'.format(fielddisplay(self, 'sealevel', 'current sea level (prior to computation) [m]')) + s += '{}\n'.format(fielddisplay(self, 'spcthickness', 'thickness constraints (NaN means no constraint) [m]')) + s += '{}\n'.format(fielddisplay(self, 'reltol', 'sea level rise relative convergence criterion, (NaN: not applied)')) + s += '{}\n'.format(fielddisplay(self, 'abstol', 'sea level rise absolute convergence criterion, (default, NaN: not applied)')) + s += '{}\n'.format(fielddisplay(self, 'maxiter', 'maximum number of nonlinear iterations')) + s += '{}\n'.format(fielddisplay(self, 'love_h', 'load Love number for radial displacement')) + s += '{}\n'.format(fielddisplay(self, 'love_k', 'load Love number for gravitational potential perturbation')) + s += '{}\n'.format(fielddisplay(self, 'love_l', 'load Love number for horizontal displaements')) + s += '{}\n'.format(fielddisplay(self, 'tide_love_k', 'tidal load Love number (degree 2)')) + s += '{}\n'.format(fielddisplay(self, 'tide_love_h', 'tidal load Love number (degree 2)')) + s += '{}\n'.format(fielddisplay(self, 'fluid_love', 'secular fluid Love number')) + s += '{}\n'.format(fielddisplay(self, 'equatorial_moi', 'mean equatorial moment of inertia [kg m^2]')) + s += '{}\n'.format(fielddisplay(self, 'polar_moi', 'polar moment of inertia [kg m^2]')) + s += '{}\n'.format(fielddisplay(self, 'angular_velocity', 'mean rotational velocity of earth [per second]')) + s += '{}\n'.format(fielddisplay(self, 'ocean_area_scaling', 'correction for model representation of ocean area [default: No correction]')) + s += '{}\n'.format(fielddisplay(self, 'hydro_rate', 'rate of hydrological expansion [mm / yr]')) + s += '{}\n'.format(fielddisplay(self, 'geodetic', 'compute geodetic SLR? (in addition to steric?) default 0')) + s += '{}\n'.format(fielddisplay(self, 'geodetic_run_frequency', 'how many time steps we skip before we run SLR solver during transient (default: 1)')) + s += '{}\n'.format(fielddisplay(self, 'rigid', 'rigid earth graviational potential perturbation')) + s += '{}\n'.format(fielddisplay(self, 'elastic', 'elastic earth graviational potential perturbation')) + s += '{}\n'.format(fielddisplay(self, 'rotation', 'earth rotational potential perturbation')) + s += '{}\n'.format(fielddisplay(self, 'degacc', 'accuracy (default .01 deg) for numerical discretization of the Green''s functions')) + s += '{}\n'.format(fielddisplay(self, 'transitions', 'indices into parts of the mesh that will be icecaps')) + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested')) return s # }}} def setdefaultparameters(self): # {{{ - #Convergence criterion: absolute, relative and residual - self.reltol = 0.01 #default - self.abstol = np.nan #1 mm of sea level rise - #maximum of non - linear iterations. + # Convergence criterion: absolute, relative and residual + self.reltol = 0.01 # default + self.abstol = np.nan # 1 mm of sea level rise + # Maximum number of non-linear iterations self.maxiter = 5 - #computational flags: + # Computational flags self.geodetic = 0 self.rigid = 1 self.elastic = 1 self.ocean_area_scaling = 0 self.rotation = 1 - #tidal love numbers: - self.tide_love_h = 0.6149 #degree 2 - self.tide_love_k = 0.3055 #degree 2 - #secular fluid love number: + # Tidal love numbers + self.tide_love_h = 0.6149 # degree 2 + self.tide_love_k = 0.3055 # degree 2 + # Secular fluid love number self.fluid_love = 0.942 - #moment of inertia: + # Moment of inertia self.equatorial_moi = 8.0077e37 # [kg m^2] self.polar_moi = 8.0345e37 # [kg m^2] - #mean rotational velocity of earth - self.angular_velocity = 7.2921e-5 # [s^ - 1] - #numerical discretization accuracy + # Mean rotational velocity of earth + self.angular_velocity = 7.2921e-5 # [s^-1] + # Numerical discretization accuracy self.degacc = 0.01 - #hydro + # Hydro self.hydro_rate = 0 - #how many time steps we skip before we run SLR solver during transient + # How many time steps we skip before we run SLR solver during transient self.geodetic_run_frequency = 1 - #output default: + # Output default self.requested_outputs = ['default'] - #transitions should be a cell array of vectors: + # Transitions should be a list of vectors self.transitions = [] - #horizontal displacement? (not by default) + # Horizontal displacement? (not on by default) self.horiz = 0 - #earth area + # Earth area self.planetradius = planetradius('earth') - return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return - if (solution != 'SealevelriseAnalysis') or (solution == 'TransientSolution' and md.transient.isslr == 0): + # Early return + if 'SealevelriseAnalysis' not in analyses or (solution == 'TransientSolution' and not md.transient.isslr): return md md = checkfield(md, 'fieldname', 'slr.deltathickness', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) @@ -145,11 +143,11 @@ md = checkfield(md, 'fieldname', 'slr.requested_outputs', 'stringrow', 1) md = checkfield(md, 'fieldname', 'slr.horiz', 'NaN', 1, 'Inf', 1, 'values', [0, 1]) - #check that love numbers are provided at the same level of accuracy: + # Check that love numbers are provided at the same level of accuracy: if (size(self.love_h, 0) != size(self.love_k, 0)) or (size(self.love_h, 0) != size(self.love_l, 0)): raise Exception('slr error message: love numbers should be provided at the same level of accuracy') - #cross check that whereever we have an ice load, the mask is < 0 on each vertex: + # Cross check that whereever we have an ice load, the mask is < 0 on each vertex: pos = np.where(self.deltathickness) maskpos = md.mask.ice_levelset[md.mesh.elements[pos, :]] els = np.where(maskpos > 0) @@ -156,10 +154,17 @@ if len(els[0]) > 0: warnings.warn('slr checkconsistency fail: there are elements with ice loads where some vertices are not on the ice!') - #check that if geodetic is requested, we are a mesh3dsurface model (planet), or if we are not, - #a coupler to a planet model is provided. - if self.geodetic and not md.transient.iscoupler and domaintype(md.mesh) != 'mesh3dsurface': - raise Exception('model is requesting geodetic computations without being a mesh3dsurface, or being coupled to one!') + # Check that if geodetic is requested, we are a mesh3dsurface model (planet), or if we are not, a coupler to a planet model is provided + if self.geodetic: + if md.transient.iscoupler: + # We are good + pass + else: + if md.mesh.__class__.__name__ == 'mesh3dsurface': + # We are good + pass + else: + raise Exception('model is requesting geodetic computations without being a mesh3dsurface, or being coupled to one!') return md # }}} @@ -195,7 +200,7 @@ WriteData(fid, prefix, 'object', self, 'fieldname', 'geodetic', 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'fieldname', 'planetradius', 'format', 'Double') - #process requested outputs + # Process requested outputs outputs = self.requested_outputs indices = [i for i, x in enumerate(outputs) if x == 'default'] if len(indices) > 0: Index: ../trunk-jpl/src/m/classes/linearbasalforcings.py =================================================================== --- ../trunk-jpl/src/m/classes/linearbasalforcings.py (revision 25687) +++ ../trunk-jpl/src/m/classes/linearbasalforcings.py (revision 25688) @@ -2,20 +2,20 @@ from checkfield import checkfield from fielddisplay import fielddisplay +from project3d import project3d from WriteData import WriteData class linearbasalforcings(object): - """ - LINEAR BASAL FORCINGS class definition + """LINEAR BASAL FORCINGS class definition - Usage: - basalforcings = linearbasalforcings() + Usage: + basalforcings = linearbasalforcings() """ - def __init__(self, *args): # {{{ - - if not len(args): + def __init__(self, *args): #{{{ + nargs = len(args) + if nargs == 0: print('empty init') self.deepwater_melting_rate = 0. self.deepwater_elevation = 0. @@ -25,9 +25,9 @@ self.perturbation_melting_rate = float('NaN') self.geothermalflux = float('NaN') - #set defaults + # set defaults self.setdefaultparameters() - elif len(args) == 1 and args[0].__module__ == 'basalforcings': + elif nargs == 1 and args[0].__module__ == 'basalforcings': print('converting basalforings to linearbasalforcings') inv = args[0] self.groundedice_melting_rate = inv.groundedice_melting_rate @@ -38,46 +38,55 @@ self.upperwater_melting_rate = 0. self.upperwater_elevation = 0. - #set defaults + # Set defaults self.setdefaultparameters() else: raise Exception('constructor not supported') #}}} - def __repr__(self): # {{{ - string = " linear basal forcings parameters:" - string = "%s\n%s" % (string, fielddisplay(self, "deepwater_melting_rate", "basal melting rate (positive if melting applied for floating ice whith base < deepwater_elevation) [m/yr]")) - string = "%s\n%s" % (string, fielddisplay(self, "deepwater_elevation", "elevation of ocean deepwater [m]")) - string = "%s\n%s" % (string, fielddisplay(self, "upperwater_melting_rate", "upper melting rate (positive if melting applied for floating ice whith base >= upperwater_elevation) [m/yr]")) - string = "%s\n%s" % (string, fielddisplay(self, "upperwater_elevation", "elevation of ocean upper water [m]")) - string = "%s\n%s" % (string, fielddisplay(self, "groundedice_melting_rate", "basal melting rate (positive if melting) [m/yr]")) - string = "%s\n%s" % (string, fielddisplay(self, "perturbation_melting_rate", "perturbation applied to computed melting rate (positive if melting) [m/yr]")) - string = "%s\n%s" % (string, fielddisplay(self, "geothermalflux", "geothermal heat flux [W/m^2]")) - return string + + def __repr__(self): #{{{ + s = ' linear basal forcings parameters:\n' + s += '{}\n'.format(fielddisplay(self, "deepwater_melting_rate", "basal melting rate (positive if melting applied for floating ice whith base < deepwater_elevation) [m/yr]")) + s += '{}\n'.format(fielddisplay(self, "deepwater_elevation", "elevation of ocean deepwater [m]")) + s += '{}\n'.format(fielddisplay(self, "upperwater_melting_rate", "upper melting rate (positive if melting applied for floating ice whith base >= upperwater_elevation) [m/yr]")) + s += '{}\n'.format(fielddisplay(self, "upperwater_elevation", "elevation of ocean upper water [m]")) + s += '{}\n'.format(fielddisplay(self, "groundedice_melting_rate", "basal melting rate (positive if melting) [m/yr]")) + s += '{}\n'.format(fielddisplay(self, "perturbation_melting_rate", "perturbation applied to computed melting rate (positive if melting) [m/yr]")) + s += '{}\n'.format(fielddisplay(self, "geothermalflux", "geothermal heat flux [W/m^2]")) + return s #}}} - def initialize(self, md): # {{{ + + def extrude(self, md): #{{{ + self.perturbation_melting_rate = project3d(md, 'vector', self.perturbation_melting_rate, 'type', 'node', 'layer', 1) + self.groundedice_melting_rate = project3d(md, 'vector', self.groundedice_melting_rate, 'type', 'node', 'layer', 1) + self.geothermalflux = project3d(md, 'vector', self.geothermalflux, 'type', 'node', 'layer', 1) # Bedrock only gets geothermal flux + return self + #}}} + + def initialize(self, md): #{{{ if np.all(np.isnan(self.groundedice_melting_rate)): self.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices)) print(" no basalforcings.groundedice_melting_rate specified: values set as zero") return self #}}} + def setdefaultparameters(self): # {{{ self.deepwater_melting_rate = 50.0 self.deepwater_elevation = -800.0 self.upperwater_melting_rate = 0.0 self.upperwater_elevation = -400.0 + return self #}}} - def checkconsistency(self, md, solution, analyses): # {{{ + def checkconsistency(self, md, solution, analyses): #{{{ if not np.all(np.isnan(self.perturbation_melting_rate)): md = checkfield(md, 'fieldname', 'basalforcings.perturbation_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) - - if 'MasstransportAnalysis' in analyses and not (solution == 'TransientSolution' and not md.transient.ismasstransport): + if 'MasstransportAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.ismasstransport: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.deepwater_melting_rate', '>=', 0, 'singletimeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.upperwater_melting_rate', '>=', 0, 'singletimeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.deepwater_elevation', '<', 'basalforcings.upperwater_elevation', 'singletimeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.upperwater_elevation', '<=', 0, 'singletimeseries', 1) - if 'BalancethicknessAnalysis' in analyses: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'basalforcings.deepwater_melting_rate', '>=', 0, 'singletimeseries', 1) @@ -84,8 +93,7 @@ md = checkfield(md, 'fieldname', 'basalforcings.upperwater_melting_rate', '>=', 0, 'singletimeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.deepwater_elevation', '<', 'basalforcings.upperwater_elevation', 'singletimeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.upperwater_elevation', '<=', 0, 'singletimeseries', 1) - - if 'ThermalAnalysis' in analyses and not (solution == 'TransientSolution' and not md.transient.isthermal): + if 'ThermalAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.isthermal: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.deepwater_melting_rate', '>=', 0, 'singletimeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.upperwater_melting_rate', '>=', 0, 'singletimeseries', 1) @@ -94,8 +102,9 @@ md = checkfield(md, 'fieldname', 'basalforcings.geothermalflux', 'NaN', 1, 'Inf', 1, 'timeseries', 1, '>=', 0) return md - # }}} - def marshall(self, prefix, md, fid): # {{{ + #}}} + + def marshall(self, prefix, md, fid): #{{{ yts = md.constants.yts WriteData(fid, prefix, 'name', 'md.basalforcings.model', 'data', 2, 'format', 'Integer') @@ -106,4 +115,4 @@ WriteData(fid, prefix, 'object', self, 'fieldname', 'deepwater_elevation', 'format', 'DoubleMat', 'mattype', 3, 'name', 'md.basalforcings.deepwater_elevation', 'yts', yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'upperwater_melting_rate', 'format', 'DoubleMat', 'mattype', 3, 'timeserieslength', 2, 'name', 'md.basalforcings.upperwater_melting_rate', 'scale', 1. / yts, 'yts', yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'upperwater_elevation', 'format', 'DoubleMat', 'mattype', 3, 'name', 'md.basalforcings.upperwater_elevation', 'yts', yts) - # }}} + #}}} Index: ../trunk-jpl/src/m/classes/solidearthsettings.py =================================================================== --- ../trunk-jpl/src/m/classes/solidearthsettings.py (revision 25687) +++ ../trunk-jpl/src/m/classes/solidearthsettings.py (revision 25688) @@ -6,12 +6,11 @@ class solidearthsettings(object): - ''' - SOLIDEARTHSETTINGS class definition + """SOLIDEARTHSETTINGS class definition - Usage: - solidearthsettings = solidearthsettings() - ''' + Usage: + solidearthsettings = solidearthsettings() + """ def __init__(self, *args): #{{{ nargin = len(args) @@ -33,19 +32,18 @@ s += '{}\n'.format(fielddisplay(self, 'rigid', 'rigid earth graviational potential perturbation')) s += '{}\n'.format(fielddisplay(self, 'elastic', 'elastic earth graviational potential perturbation')) s += '{}\n'.format(fielddisplay(self, 'degacc', 'accuracy (default .01 deg) for numerical discretization of the Green\'s functions')) - return s #}}} def setdefaultparameters(self): # {{{ - #Convergence criterion: absolute, relative, and residual + # Convergence criterion: absolute, relative, and residual self.reltol = 0.01 # 1 percent self.abstol = np.nan # default - #maximum of non-linear iterations + # Maximum of non-linear iterations self.maxiter = 5 - #computational flags + # Computational flags self.rigid = 1 self.elastic = 1 self.rotation = 1 @@ -52,20 +50,20 @@ self.ocean_area_scaling = 0 self.computesealevelchange = 0 - #numerical discetization accuracy + # Numerical discretization accuracy self.degacc = .01 - #how many time steps we skip before we run solidearthsettings solver during transient + # How many time steps we skip before we run solidearthsettings solver during transient self.runfrequency = 1 - #horizontal displacemnet? (not by default) + # Horizontal displacement? (not on by default) self.horiz = 0 + return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and md.transient.isslr == 0): + if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isslr): return md - md = checkfield(md, 'fieldname', 'solidearth.settings.reltol', 'size', [1]) md = checkfield(md, 'fieldname', 'solidearth.settings.abstol', 'size', [1]) md = checkfield(md, 'fieldname', 'solidearth.settings.maxiter', 'size', [1], '>=', 1) @@ -73,18 +71,17 @@ md = checkfield(md, 'fieldname', 'solidearth.settings.degacc', 'size', [1], '>=', 1e-10) md = checkfield(md, 'fieldname', 'solidearth.settings.horiz', 'NaN', 1, 'Inf', 1, 'values', [0, 1]) - #a coupler to planet model is provided + # A coupler to planet model is provided if self.computesealevelchange: if md.transient.iscoupler: - #we are good + # We are good pass else: if md.mesh.__class__.__name__ == 'mesh3dsurface': - #we are good + # We are good pass else: raise Exception('model is requesting sealevel computations without being a mesh3dsurface, or being coupled to one!') - return md #}}} Index: ../trunk-jpl/src/m/classes/sealevelmodel.py =================================================================== --- ../trunk-jpl/src/m/classes/sealevelmodel.py (revision 25687) +++ ../trunk-jpl/src/m/classes/sealevelmodel.py (revision 25688) @@ -1,6 +1,8 @@ from copy import deepcopy import warnings + import numpy as np + from fielddisplay import fielddisplay from generic import generic from getsubattr import getsubattr @@ -46,11 +48,9 @@ self.eltransitions = [] self.planet = '' - # Create a default object self.setdefaultparameters() if len(args): - # Use provided options to set fields options = pairoptions(*args) # Recover all the icecap models @@ -89,27 +89,27 @@ def checkconsistency(slm, solutiontype): # {{{ # Is the coupler turned on? for i in range(len(slm.icecaps)): - if slm.icecaps[i].transient.iscoupler == 0: + if not slm.icecaps[i].transient.iscoupler: warnings.warn('sealevelmodel checkconsistency error: icecap model {} should have the transient coupler option turned on!'.format(slm.icecaps[i].miscellaneous.name)) - if slm.earth.transient.iscoupler == 0: + if not slm.earth.transient.iscoupler: warnings.warn('sealevelmodel checkconsistency error: earth model should have the transient coupler option turned on!') # Check that the transition vectors have the right size for i in range(len(slm.icecaps)): if slm.icecaps[i].mesh.numberofvertices != len(slm.earth.slr.transitions[i]): - raise RuntimeError('sealevelmodel checkconsistency issue with size of transition vector for ice cap: {} name: {}'.format(i, slm.icecaps[i].miscellaneous.name)) + raise Exception('sealevelmodel checkconsistency issue with size of transition vector for ice cap: {} name: {}'.format(i, slm.icecaps[i].miscellaneous.name)) # Check that run frequency is the same everywhere for i in range(len(slm.icecaps)): if slm.icecaps[i].slr.geodetic_run_frequency != slm.earth.geodetic_run_frequency: - raise RuntimeError('sealevelmodel checkconsistency error: icecap model {} should have the same run frequency as earth!'.format(slm.icecaps[i].miscellaneous.name)) + raise Exception('sealevelmodel checkconsistency error: icecap model {} should have the same run frequency as earth!'.format(slm.icecaps[i].miscellaneous.name)) # Make sure steric_rate is the same everywhere for i in range(len(slm.icecaps)): md = slm.icecaps[i] if np.nonzero(md.slr.steric_rate - slm.earth.slr.steric_rate[slm.earth.slr.transitions[i]]) != []: - raise RuntimeError('steric rate on ice cap {} is not the same as for the earth'.format(md.miscellaneous.name)) + raise Exception('steric rate on ice cap {} is not the same as for the earth'.format(md.miscellaneous.name)) #}}} def mergeresults(self): # {{{ @@ -167,7 +167,7 @@ def addbasin(self, bas): # {{{ if bas.__class__.__name__ != 'basin': - raise RuntimeError('addbasin method only takes a \'basin\' class object as input') + raise Exception('addbasin method only takes a \'basin\' class object as input') self.basins.append(bas) #}}} @@ -386,7 +386,7 @@ def viscousiterations(self): #{{{ for i in range(len(self.icecaps)): ic = self.icecaps[i] - mvi = ic.resutls.TransientSolution[0].StressbalanceConvergenceNumSteps + mvi = ic.results.TransientSolution[0].StressbalanceConvergenceNumSteps for j in range(1, len(ic.results.TransientSolution) - 1): mvi = np.max(mvi, ic.results.TransientSolution[j].StressbalanceConvergenceNumSteps) print("{}, {}: {}".format(i, self.icecaps[i].miscellaneous.name, mvi)) @@ -464,7 +464,7 @@ ic = self.earth if not noearth: - ic.results.TransientSolution = ic.resutls.TransientSolution[:mintimestep] + ic.results.TransientSolution = ic.results.TransientSolution[:mintimestep] self.earth = ic Index: ../trunk-jpl/src/m/classes/surfaceload.py =================================================================== --- ../trunk-jpl/src/m/classes/surfaceload.py (revision 25687) +++ ../trunk-jpl/src/m/classes/surfaceload.py (revision 25688) @@ -30,27 +30,22 @@ s += '{}\n'.format(fielddisplay(self, 'icethicknesschange', 'thickness change: ice height equivalent [mIce/yr]')) s += '{}\n'.format(fielddisplay(self, 'waterheightchange', 'water height change: water height equivalent [mWater/yr]')) s += '{}\n'.format(fielddisplay(self, 'other', 'other loads (sediments) [kg/m^2/yr]')) - return s #}}} def setdefaultparameters(self): # {{{ - return + return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and md.transient.isslr == 0): + if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isslr): return md - if len(self.icethicknesschange): md = checkfield(md,'fieldname', 'solidearth.surfaceload.icethicknesschange', 'timeseries', 1, 'NaN', 1, 'Inf', 1) - if len(self.waterheightchange): md = checkfield(md,'fieldname', 'solidearth.surfaceload.waterheightchange', 'timeseries', 1, 'NaN', 1, 'Inf', 1) - if len(self.other): md = checkfield(md,'fieldname', 'solidearth.surfaceload.other', 'timeseries', 1, 'NaN', 1, 'Inf', 1) - return md #}}} @@ -57,13 +52,10 @@ def marshall(self, prefix, md, fid): #{{{ if len(self.icethicknesschange) == 0: self.icethicknesschange = np.zeros((md.mesh.numberofelements + 1, )) - if len(self.waterheightchange) == 0: self.waterheightchange = np.zeros((md.mesh.numberofelements + 1, )) - if len(self.other) == 0: self.other = np.zeros((md.mesh.numberofelements + 1, )) - WriteData(fid, prefix, 'object', self, 'fieldname', 'icethicknesschange', 'name', 'md.solidearth.surfaceload.icethicknesschange', 'format', 'DoubleMat', 'mattype', 2, 'timeserieslength', md.mesh.numberofelements + 1, 'yts', md.constants.yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'waterheightchange', 'name', 'md.solidearth.surfaceload.waterheightchange', 'format', 'DoubleMat', 'mattype', 2, 'timeserieslength', md.mesh.numberofelements + 1, 'yts', md.constants.yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'other', 'name', 'md.solidearth.surfaceload.other', 'format', 'DoubleMat', 'mattype', 2, 'timeserieslength', md.mesh.numberofelements + 1, 'yts', md.constants.yts) Index: ../trunk-jpl/src/m/classes/lovenumbers.py =================================================================== --- ../trunk-jpl/src/m/classes/lovenumbers.py (revision 25687) +++ ../trunk-jpl/src/m/classes/lovenumbers.py (revision 25688) @@ -8,25 +8,24 @@ class lovenumbers(object): #{{{ - ''' - LOVENUMBERS numbers class definition + """LOVENUMBERS class definition - Usage: - lovenumbers = lovenumbers() #will setup love numbers deg 1001 by default - lovenumbers = lovenumbers('maxdeg', 10001); #supply numbers of degrees required (here, 10001) - ''' + Usage: + lovenumbers = lovenumbers() #will setup love numbers deg 1001 by default + lovenumbers = lovenumbers('maxdeg', 10001); #supply numbers of degrees required (here, 10001) + """ def __init__(self, *args): #{{{ - #regular love numbers: - self.h = [] #provided by PREM model - self.k = [] #idem - self.l = [] #idem + # Regular love numbers + self.h = [] # Provided by PREM model + self.k = [] # idem + self.l = [] # idem - #tidal love numbers for computing rotational feedback: + # Tidal love numbers for computing rotational feedback self.th = [] self.tk = [] self.tl = [] - self.tk2secular = 0 #deg 2 secular number. + self.tk2secular = 0 # deg 2 secular number options = pairoptions(*args) maxdeg = options.getfieldvalue('maxdeg', 1000) @@ -35,7 +34,7 @@ #}}} def setdefaultparameters(self, maxdeg, referenceframe): #{{{ - #initialize love numbers: + # Initialize love numbers self.h = getlovenumbers('type', 'loadingverticaldisplacement', 'referenceframe', referenceframe, 'maxdeg', maxdeg) self.k = getlovenumbers('type', 'loadinggravitationalpotential', 'referenceframe', referenceframe, 'maxdeg', maxdeg) self.l = getlovenumbers('type', 'loadinghorizontaldisplacement', 'referenceframe', referenceframe, 'maxdeg', maxdeg) @@ -43,14 +42,14 @@ self.tk = getlovenumbers('type', 'tidalgravitationalpotential', 'referenceframe', referenceframe, 'maxdeg', maxdeg) self.tl = getlovenumbers('type', 'tidalhorizontaldisplacement', 'referenceframe', referenceframe, 'maxdeg', maxdeg) - #secular fluid love number: + # Secular fluid love number self.tk2secular = 0.942 + return self #}}} def checkconsistency(self, md, solution, analyses): #{{{ - if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and md.transient.isslr == 0): + if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isslr): return - md = checkfield(md, 'fieldname', 'solidearth.lovenumbers.h', 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'solidearth.lovenumbers.k', 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'solidearth.lovenumbers.l', 'NaN', 1, 'Inf', 1) @@ -58,10 +57,10 @@ md = checkfield(md, 'fieldname', 'solidearth.lovenumbers.th', 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'solidearth.lovenumbers.tk', 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'solidearth.lovenumbers.tk2secular', 'NaN', 1, 'Inf', 1) - - #check that love numbers are provided at the same level of accuracy: + # Check that love numbers are provided at the same level of accuracy if (self.h.shape[0] != self.k.shape[0]) or (self.h.shape[0] != self.l.shape[0]): raise ValueError('lovenumbers error message: love numbers should be provided at the same level of accuracy') + return md #}}} def defaultoutputs(self, md): #{{{ @@ -69,8 +68,7 @@ #}}} def __repr__(self): #{{{ - s = ' lovenumbers parameters:' - + s = ' lovenumbers parameters:\n' s += '{}\n'.format(fielddisplay(self, 'h', 'load Love number for radial displacement')) s += '{}\n'.format(fielddisplay(self, 'k', 'load Love number for gravitational potential perturbation')) s += '{}\n'.format(fielddisplay(self, 'l', 'load Love number for horizontal displacements')) @@ -77,7 +75,6 @@ s += '{}\n'.format(fielddisplay(self, 'th', 'tidal load Love number (deg 2)')) s += '{}\n'.format(fielddisplay(self, 'tk', 'tidal load Love number (deg 2)')) s += '{}\n'.format(fielddisplay(self, 'tk2secular', 'secular fluid Love number')) - return s #}}} Index: ../trunk-jpl/src/m/classes/frictionpism.py =================================================================== --- ../trunk-jpl/src/m/classes/frictionpism.py (revision 25687) +++ ../trunk-jpl/src/m/classes/frictionpism.py (revision 25688) @@ -1,17 +1,18 @@ import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay from project3d import project3d -from checkfield import checkfield from WriteData import WriteData class frictionpism(object): - """ - frictionpism class definition + """FRICTIONPISM class definition Usage: - frictionpism = frictionpism() + frictionpism = frictionpism() """ + def __init__(self): self.pseudoplasticity_exponent = 0. self.threshold_speed = 0. @@ -28,7 +29,7 @@ self.till_friction_angle = project3d(md, 'vector', self.till_friction_angle, 'type', 'node', 'layer', 1) self.sediment_compressibility_coefficient = project3d(md, 'vector', self.sediment_compressibility_coefficient, 'type', 'node', 'layer', 1) return self - # }}} + # }}} def setdefaultparameters(self): # {{{ self.pseudoplasticity_exponent = 0.6 @@ -39,13 +40,11 @@ # }}} def checkconsistency(self, md, solution, analyses): # {{{ - - #Early return + # Early return if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: return md - if solution == 'TransientSolution' and md.transient.isstressbalance == 0 and md.transient.isthermal == 0: + if solution == 'TransientSolution' and not md.transient.isstressbalance and not md.transient.isthermal: return md - md = checkfield(md, 'fieldname', 'friction.pseudoplasticity_exponent', 'numel', [1], '>', 0, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.threshold_speed', 'numel', [1], '>', 0, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.delta', 'numel', [1], '>', 0, '<', 1, 'NaN', 1, 'Inf', 1) @@ -52,21 +51,21 @@ md = checkfield(md, 'fieldname', 'friction.void_ratio', 'numel', [1], '>', 0, '<', 1, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.till_friction_angle', 'NaN', 1, 'Inf', 1, '<', 360., '>', 0., 'size', [md.mesh.numberofvertices]) #User should give angle in degrees, Matlab calculates in rad md = checkfield(md, 'fieldname', 'friction.sediment_compressibility_coefficient', 'NaN', 1, 'Inf', 1, '<', 1., '>', 0., 'size', [md.mesh.numberofvertices]) + return md # }}} def __repr__(self): # {{{ - string = 'Basal shear stress parameters for the PISM friction law (See Aschwanden et al. 2016 for more details)' - string = "%s\n%s" % (string, fielddisplay(self, 'pseudoplasticity_exponent', 'pseudoplasticity exponent [dimensionless]')) - string = "%s\n%s" % (string, fielddisplay(self, 'threshold_speed', 'threshold speed [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'delta', 'lower limit of the effective pressure, expressed as a fraction of overburden pressure [dimensionless]')) - string = "%s\n%s" % (string, fielddisplay(self, 'void_ratio', 'void ratio at a reference effective pressure [dimensionless]')) - string = "%s\n%s" % (string, fielddisplay(self, 'till_friction_angle', 'till friction angle [deg], recommended default: 30 deg')) - string = "%s\n%s" % (string, fielddisplay(self, 'sediment_compressibility_coefficient', 'coefficient of compressibility of the sediment [dimensionless], recommended default: 0.12')) - # }}} + s = 'Basal shear stress parameters for the PISM friction law (See Aschwanden et al. 2016 for more details)\n' + s += "{}\n".format(fielddisplay(self, 'pseudoplasticity_exponent', 'pseudoplasticity exponent [dimensionless]')) + s += "{}\n".format(fielddisplay(self, 'threshold_speed', 'threshold speed [m / yr]')) + s += "{}\n".format(fielddisplay(self, 'delta', 'lower limit of the effective pressure, expressed as a fraction of overburden pressure [dimensionless]')) + s += "{}\n".format(fielddisplay(self, 'void_ratio', 'void ratio at a reference effective pressure [dimensionless]')) + s += "{}\n".format(fielddisplay(self, 'till_friction_angle', 'till friction angle [deg], recommended default: 30 deg')) + s += "{}\n".format(fielddisplay(self, 'sediment_compressibility_coefficient', 'coefficient of compressibility of the sediment [dimensionless], recommended default: 0.12')) + # }}} def marshall(self, prefix, md, fid): # {{{ yts = md.constants.yts - WriteData(fid, prefix, 'name', 'md.friction.law', 'data', 10, 'format', 'Integer') WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'pseudoplasticity_exponent', 'format', 'Double') WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'threshold_speed', 'format', 'Double', 'scale', 1. / yts) @@ -74,4 +73,4 @@ WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'void_ratio', 'format', 'Double') WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'till_friction_angle', 'format', 'DoubleMat', 'mattype', 1) WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'sediment_compressibility_coefficient', 'format', 'DoubleMat', 'mattype', 1) - # }}} + # }}} Index: ../trunk-jpl/src/m/classes/m1qn3inversion.py =================================================================== --- ../trunk-jpl/src/m/classes/m1qn3inversion.py (revision 25687) +++ ../trunk-jpl/src/m/classes/m1qn3inversion.py (revision 25688) @@ -1,44 +1,42 @@ import numpy as np + +from checkfield import checkfield +from fielddisplay import fielddisplay +from marshallcostfunctions import marshallcostfunctions from project3d import project3d -from fielddisplay import fielddisplay -from checkfield import checkfield -from WriteData import WriteData from supportedcontrols import supportedcontrols from supportedcostfunctions import supportedcostfunctions -from marshallcostfunctions import marshallcostfunctions +from WriteData import WriteData class m1qn3inversion(object): - ''' - M1QN3 class definition + """M1QN3 class definition - Usage: - m1qn3inversion = m1qn3inversion() - ''' + Usage: + m1qn3inversion = m1qn3inversion() + """ def __init__(self, *args): # {{{ - if not len(args): print('empty init') self.iscontrol = 0 self.incomplete_adjoint = 0 - self.control_parameters = float('NaN') - self.control_scaling_factors = float('NaN') + self.control_parameters = np.nan + self.control_scaling_factors = np.nan self.maxsteps = 0 self.maxiter = 0 self.dxmin = 0. self.gttol = 0. - self.cost_functions = float('NaN') - self.cost_functions_coefficients = float('NaN') - self.min_parameters = float('NaN') - self.max_parameters = float('NaN') - self.vx_obs = float('NaN') - self.vy_obs = float('NaN') - self.vz_obs = float('NaN') - self.vel_obs = float('NaN') - self.thickness_obs = float('NaN') + self.cost_functions = np.nan + self.cost_functions_coefficients = np.nan + self.min_parameters = np.nan + self.max_parameters = np.nan + self.vx_obs = np.nan + self.vy_obs = np.nan + self.vz_obs = np.nan + self.vel_obs = np.nan + self.thickness_obs = np.nan - #set defaults self.setdefaultparameters() elif len(args) == 1 and args[0].__module__ == 'inversion': print('converting inversion to m1qn3inversion') @@ -65,50 +63,36 @@ #}}} def __repr__(self): # {{{ - string = ' m1qn3inversion parameters:' - string = "%s\n%s" % (string, fielddisplay(self, 'iscontrol', 'is inversion activated?')) - string = "%s\n%s" % (string, fielddisplay(self, 'incomplete_adjoint', '1: linear viscosity, 0: non - linear viscosity')) - string = "%s\n%s" % (string, fielddisplay(self, 'control_parameters', 'ex: [''FrictionCoefficient''], or [''MaterialsRheologyBbar'']')) - string = "%s\n%s" % (string, fielddisplay(self, 'control_scaling_factors', 'order of magnitude of each control (useful for multi - parameter optimization)')) - string = "%s\n%s" % (string, fielddisplay(self, 'maxsteps', 'maximum number of iterations (gradient computation)')) - string = "%s\n%s" % (string, fielddisplay(self, 'maxiter', 'maximum number of Function evaluation (forward run)')) - string = "%s\n%s" % (string, fielddisplay(self, 'dxmin', 'convergence criterion: two points less than dxmin from eachother (sup - norm) are considered identical')) - string = "%s\n%s" % (string, fielddisplay(self, 'gttol', '||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)')) - string = "%s\n%s" % (string, fielddisplay(self, 'cost_functions', 'indicate the type of response for each optimization step')) - string = "%s\n%s" % (string, fielddisplay(self, 'cost_functions_coefficients', 'cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) - string = "%s\n%s" % (string, fielddisplay(self, 'min_parameters', 'absolute minimum acceptable value of the inversed parameter on each vertex')) - string = "%s\n%s" % (string, fielddisplay(self, 'max_parameters', 'absolute maximum acceptable value of the inversed parameter on each vertex')) - string = "%s\n%s" % (string, fielddisplay(self, 'vx_obs', 'observed velocity x component [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'vy_obs', 'observed velocity y component [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'vel_obs', 'observed velocity magnitude [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'thickness_obs', 'observed thickness [m]')) - string = "%s\n%s" % (string, 'Available cost functions:') - string = "%s\n%s" % (string, ' 101: SurfaceAbsVelMisfit') - string = "%s\n%s" % (string, ' 102: SurfaceRelVelMisfit') - string = "%s\n%s" % (string, ' 103: SurfaceLogVelMisfit') - string = "%s\n%s" % (string, ' 104: SurfaceLogVxVyMisfit') - string = "%s\n%s" % (string, ' 105: SurfaceAverageVelMisfit') - string = "%s\n%s" % (string, ' 201: ThicknessAbsMisfit') - string = "%s\n%s" % (string, ' 501: DragCoefficientAbsGradient') - string = "%s\n%s" % (string, ' 502: RheologyBbarAbsGradient') - string = "%s\n%s" % (string, ' 503: ThicknessAbsGradient') - return string + s = ' m1qn3inversion parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'iscontrol', 'is inversion activated?')) + s += '{}\n'.format(fielddisplay(self, 'incomplete_adjoint', '1: linear viscosity, 0: non - linear viscosity')) + s += '{}\n'.format(fielddisplay(self, 'control_parameters', 'ex: [''FrictionCoefficient''], or [''MaterialsRheologyBbar'']')) + s += '{}\n'.format(fielddisplay(self, 'control_scaling_factors', 'order of magnitude of each control (useful for multi - parameter optimization)')) + s += '{}\n'.format(fielddisplay(self, 'maxsteps', 'maximum number of iterations (gradient computation)')) + s += '{}\n'.format(fielddisplay(self, 'maxiter', 'maximum number of Function evaluation (forward run)')) + s += '{}\n'.format(fielddisplay(self, 'dxmin', 'convergence criterion: two points less than dxmin from eachother (sup - norm) are considered identical')) + s += '{}\n'.format(fielddisplay(self, 'gttol', '||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)')) + s += '{}\n'.format(fielddisplay(self, 'cost_functions', 'indicate the type of response for each optimization step')) + s += '{}\n'.format(fielddisplay(self, 'cost_functions_coefficients', 'cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) + s += '{}\n'.format(fielddisplay(self, 'min_parameters', 'absolute minimum acceptable value of the inversed parameter on each vertex')) + s += '{}\n'.format(fielddisplay(self, 'max_parameters', 'absolute maximum acceptable value of the inversed parameter on each vertex')) + s += '{}\n'.format(fielddisplay(self, 'vx_obs', 'observed velocity x component [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'vy_obs', 'observed velocity y component [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'vel_obs', 'observed velocity magnitude [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'thickness_obs', 'observed thickness [m]')) + s += '{}\n'.format('Available cost functions:') + s += '{}\n'.format(' 101: SurfaceAbsVelMisfit') + s += '{}\n'.format(' 102: SurfaceRelVelMisfit') + s += '{}\n'.format(' 103: SurfaceLogVelMisfit') + s += '{}\n'.format(' 104: SurfaceLogVxVyMisfit') + s += '{}\n'.format(' 105: SurfaceAverageVelMisfit') + s += '{}\n'.format(' 201: ThicknessAbsMisfit') + s += '{}\n'.format(' 501: DragCoefficientAbsGradient') + s += '{}\n'.format(' 502: RheologyBbarAbsGradient') + s += '{}\n'.format(' 503: ThicknessAbsGradient') + return s #}}} - def extrude(self, md): # {{{ - self.vx_obs = project3d(md, 'vector', self.vx_obs, 'type', 'node') - self.vy_obs = project3d(md, 'vector', self.vy_obs, 'type', 'node') - self.vel_obs = project3d(md, 'vector', self.vel_obs, 'type', 'node') - self.thickness_obs = project3d(md, 'vector', self.thickness_obs, 'type', 'node') - if not np.any(np.isnan(self.cost_functions_coefficients)): - self.cost_functions_coefficients = project3d(md, 'vector', self.cost_functions_coefficients, 'type', 'node') - if not np.any(np.isnan(self.min_parameters)): - self.min_parameters = project3d(md, 'vector', self.min_parameters, 'type', 'node') - if not np.any(np.isnan(self.max_parameters)): - self.max_parameters = project3d(md, 'vector', self.max_parameters, 'type', 'node') - return self - #}}} - def setdefaultparameters(self): # {{{ #default is incomplete adjoint for now self.incomplete_adjoint = 1 @@ -129,8 +113,22 @@ return self #}}} + def extrude(self, md): # {{{ + self.vx_obs = project3d(md, 'vector', self.vx_obs, 'type', 'node') + self.vy_obs = project3d(md, 'vector', self.vy_obs, 'type', 'node') + self.vel_obs = project3d(md, 'vector', self.vel_obs, 'type', 'node') + self.thickness_obs = project3d(md, 'vector', self.thickness_obs, 'type', 'node') + if not np.any(np.isnan(self.cost_functions_coefficients)): + self.cost_functions_coefficients = project3d(md, 'vector', self.cost_functions_coefficients, 'type', 'node') + if not np.any(np.isnan(self.min_parameters)): + self.min_parameters = project3d(md, 'vector', self.min_parameters, 'type', 'node') + if not np.any(np.isnan(self.max_parameters)): + self.max_parameters = project3d(md, 'vector', self.max_parameters, 'type', 'node') + return self + #}}} + def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if not self.iscontrol: return md @@ -152,10 +150,11 @@ if solution == 'BalancethicknessSolution': md = checkfield(md, 'fieldname', 'inversion.thickness_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) + elif solution == 'BalancethicknessSoftSolution': + md = checkfield(md, 'fieldname', 'inversion.thickness_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) else: md = checkfield(md, 'fieldname', 'inversion.vx_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'inversion.vy_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) - return md # }}} @@ -179,12 +178,12 @@ WriteData(fid, prefix, 'object', self, 'class', 'inversion', 'fieldname', 'vz_obs', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts) WriteData(fid, prefix, 'object', self, 'class', 'inversion', 'fieldname', 'thickness_obs', 'format', 'DoubleMat', 'mattype', 1) - #process control parameters + # Process control parameters num_control_parameters = len(self.control_parameters) WriteData(fid, prefix, 'object', self, 'fieldname', 'control_parameters', 'format', 'StringArray') WriteData(fid, prefix, 'data', num_control_parameters, 'name', 'md.inversion.num_control_parameters', 'format', 'Integer') - #process cost functions + # Process cost functions num_cost_functions = np.size(self.cost_functions) data = marshallcostfunctions(self.cost_functions) WriteData(fid, prefix, 'data', data, 'name', 'md.inversion.cost_functions', 'format', 'StringArray') Index: ../trunk-jpl/src/m/classes/inversion.m =================================================================== --- ../trunk-jpl/src/m/classes/inversion.m (revision 25687) +++ ../trunk-jpl/src/m/classes/inversion.m (revision 25688) @@ -107,7 +107,6 @@ md = checkmessage(md,['inversion can only be performed for SSA, HO or FS ice flow models']); end end - if strcmp(solution,'BalancethicknessSolution') md = checkfield(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices 1],'NaN',1,'Inf',1); elseif strcmp(solution,'BalancethicknessSoftSolution') Index: ../trunk-jpl/src/m/classes/qmustatistics.m =================================================================== --- ../trunk-jpl/src/m/classes/qmustatistics.m (revision 25687) +++ ../trunk-jpl/src/m/classes/qmustatistics.m (revision 25688) @@ -116,7 +116,7 @@ WriteData(fid,prefix,'name','md.qmu.statistics.numstatistics','data',length(self.method),'format','Integer'); for i=1:length(self.method), m=self.method(i); - WriteData(fid,prefix,'name',sprintf('md.qmu.statistics.method(%i).name',i),'data',m.name,'Format','String'); + WriteData(fid,prefix,'name',sprintf('md.qmu.statistics.method(%i).name',i),'data',m.name,'format','String'); WriteData(fid,prefix,'data',m.fields,'name',sprintf('md.qmu.statistics.method(%i).fields',i),'format','StringArray'); WriteData(fid,prefix,'data',m.steps,'name',sprintf('md.qmu.statistics.method(%i).steps',i),'format','IntMat','mattype',3); Index: ../trunk-jpl/src/m/classes/sealevelmodel.m =================================================================== --- ../trunk-jpl/src/m/classes/sealevelmodel.m (revision 25687) +++ ../trunk-jpl/src/m/classes/sealevelmodel.m (revision 25688) @@ -26,14 +26,12 @@ end methods function slm = sealevelmodel(varargin) % {{{ + slm=setdefaultparameters(slm); - if nargin==0, - slm=setdefaultparameters(slm); - else - slm=setdefaultparameters(slm); + if nargin==1, - options=pairoptions(varargin{:}); - + options=pairoptions(varargin{:}); + %recover all the icecap models: slm.icecaps=getfieldvalues(options,'ice_cap',{}); @@ -418,19 +416,19 @@ self.earth=md; end % }}} -function viscousiterations(self) % {{{ - for i=1:length(self.icecaps), - ic=self.icecaps{i}; - mvi=ic.results.TransientSolution(1).StressbalanceConvergenceNumSteps; - for j=2:length(ic.results.TransientSolution)-1, - mvi=max(mvi,ic.results.TransientSolution(j).StressbalanceConvergenceNumSteps); - end - disp(sprintf('%i, %s: %i',i,self.icecaps{i}.miscellaneous.name,mvi)); - end -end % }}} + function viscousiterations(self) % {{{ + for i=1:length(self.icecaps), + ic=self.icecaps{i}; + mvi=ic.results.TransientSolution(1).StressbalanceConvergenceNumSteps; + for j=2:length(ic.results.TransientSolution)-1, + mvi=max(mvi,ic.results.TransientSolution(j).StressbalanceConvergenceNumSteps); + end + disp(sprintf('%i, %s: %i',i,self.icecaps{i}.miscellaneous.name,mvi)); + end + end % }}} function maxtimestep(self) % {{{ for i=1:length(self.icecaps), - ic=self.icecaps{i}; + ic=self.icecaps{i}; mvi=length(ic.results.TransientSolution); timei=ic.results.TransientSolution(end).time; disp(sprintf('%i, %s: %i/%g',i,self.icecaps{i}.miscellaneous.name,mvi,timei)); Index: ../trunk-jpl/src/m/classes/giamme.py =================================================================== --- ../trunk-jpl/src/m/classes/giamme.py (revision 25687) +++ ../trunk-jpl/src/m/classes/giamme.py (revision 25688) @@ -7,12 +7,11 @@ class giamme(object): - ''' - GIAMME class definition + """GIAMME class definition - Usage: - gia = giamme() #gia class based on a multi-model ensemble (ex: Caron et al 2017 statistics) - ''' + Usage: + gia = giamme() #gia class based on a multi-model ensemble (ex: Caron et al 2017 statistics) + """ def __init__(self, *args): #{{{ self.modelid = 0 #index into the multi-model ensemble @@ -32,22 +31,21 @@ s += '{}\n'.format(fielddisplay(self, 'modelid', 'index into the multi-model ensemble, determines which field will be used.')) s += '{}\n'.format(fielddisplay(self, 'Ngia', 'geoid (mm/yr).')) s += '{}\n'.format(fielddisplay(self, 'Ugia', 'uplift (mm/yr).')) - return s #}}} def setdefaultparameters(self): # {{{ - return + return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and md.transient.isslr == 0): + if 'SealevelriseAnalysis' not in analyses: return md - + if solution == 'TransientSolution' and not md.transient.isslr: + return md md = checkfield(md, 'field', self.Ngia, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'field', self.Ugia, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'field', self.modelid, 'NaN', 1, 'Inf', 1, '>=', 0, '<=', len(self.Ngia)) - return md #}}} @@ -70,6 +68,5 @@ for i in range(len(self.Ngia)): self.Ngia[i] = project3d(md, 'vector', self.Ngia[i], 'type', 'node', 'layer', 1) self.Ugia[i] = project3d(md, 'vector', self.Ugia[i], 'type', 'node', 'layer', 1) - return self #}}} Index: ../trunk-jpl/src/m/classes/qmu.py =================================================================== --- ../trunk-jpl/src/m/classes/qmu.py (revision 25687) +++ ../trunk-jpl/src/m/classes/qmu.py (revision 25688) @@ -11,11 +11,10 @@ class qmu(object): - """ - QMU class definition + """QMU class definition - Usage: - qmu = qmu() + Usage: + qmu = qmu() """ def __init__(self): # {{{ @@ -43,17 +42,14 @@ self.adjacency = float('NaN') self.vertex_weight = float('NaN') - #set defaults self.setdefaultparameters() - #}}} def __repr__(self): # {{{ s = ' qmu parameters:\n' - - s += "%s\n" % fielddisplay(self, 'isdakota', 'is qmu analysis activated?') - s += "%s\n" % fielddisplay(self, 'output', 'are we outputting ISSM results, default is 0') + s += '{}\n'.format(fielddisplay(self, 'isdakota', 'is QMU analysis activated?')) + s += '{}\n'.format(fielddisplay(self, 'output', 'are we outputting ISSM results, default is 0')) maxlen = 0 - s += " variables: (arrays of each variable class)\n" + s += ' variables: (arrays of each variable class)\n' # OrderedStruct's iterator returns individual name / array - of - functions pairs for variable in self.variables: @@ -73,7 +69,7 @@ b = 1 if len(size) < 2 else size[1] s += " %-*s: [%ix%i] '%s'\n" % (maxlen + 1, fname, a, b, type(response[1][0])) - s += "%s\n" % fielddisplay(self, 'numberofresponses', 'number of responses') + s += '{}\n'.format(fielddisplay(self, 'numberofresponses', 'number of responses')) if type(self.method) != OrderedDict: self.method = [self.method] @@ -90,7 +86,7 @@ for param in params: print(type(param)) print(param) - s += " params: (array of method - independent parameters)\n" + s += " params: (array of method-independent parameters)\n" fnames = vars(param) maxlen = 0 for fname in fnames: @@ -115,18 +111,17 @@ size = np.shape(getattr(result, fname)) s += " %-*s: [%ix%i] '%s'\n" % (maxlen + 1, fname, a, b, type(getattr(result, fname))) - s += "%s\n" % fielddisplay(self, 'variablepartitions', '') - s += "%s\n" % fielddisplay(self, 'variablepartitions_npart', '') - s += "%s\n" % fielddisplay(self, 'variablepartitions_nt', '') - s += "%s\n" % fielddisplay(self, 'variabledescriptors', '') - s += "%s\n" % fielddisplay(self, 'responsedescriptors', '') - s += "%s\n" % fielddisplay(self, 'method', 'array of dakota_method class') - s += "%s\n" % fielddisplay(self, 'mass_flux_profile_directory', 'directory for mass flux profiles') - s += "%s\n" % fielddisplay(self, 'mass_flux_profiles', 'list of mass_flux profiles') - s += "%s\n" % fielddisplay(self, 'mass_flux_segments', '') - s += "%s\n" % fielddisplay(self, 'adjacency', '') - s += "%s\n" % fielddisplay(self, 'vertex_weight', 'weight applied to each mesh vertex') - + s += '{}\n'.format(fielddisplay(self, 'variablepartitions', '')) + s += '{}\n'.format(fielddisplay(self, 'variablepartitions_npart', '')) + s += '{}\n'.format(fielddisplay(self, 'variablepartitions_nt', '')) + s += '{}\n'.format(fielddisplay(self, 'variabledescriptors', '')) + s += '{}\n'.format(fielddisplay(self, 'responsedescriptors', '')) + s += '{}\n'.format(fielddisplay(self, 'method', 'array of dakota_method class')) + s += '{}\n'.format(fielddisplay(self, 'mass_flux_profile_directory', 'directory for mass flux profiles')) + s += '{}\n'.format(fielddisplay(self, 'mass_flux_profiles', 'list of mass_flux profiles')) + s += '{}\n'.format(fielddisplay(self, 'mass_flux_segments', '')) + s += '{}\n'.format(fielddisplay(self, 'adjacency', '')) + s += '{}\n'.format(fielddisplay(self, 'vertex_weight', 'weight applied to each mesh vertex')) return s # }}} Index: ../trunk-jpl/src/m/classes/slr.m =================================================================== --- ../trunk-jpl/src/m/classes/slr.m (revision 25687) +++ ../trunk-jpl/src/m/classes/slr.m (revision 25688) @@ -135,7 +135,7 @@ % warning('slr checkconsistency fail: there are elements with ice loads where some vertices are not on the ice!'); %end - %check that if geodetic is requested, we are a mesh3dsurface model (planet), or if we are not, + %check that if geodetic is requested, we are a mesh3dsurface model (planet), or if we are not, %a coupler to a planet model is provided. if self.geodetic, if md.transient.iscoupler, Index: ../trunk-jpl/src/m/classes/friction.py =================================================================== --- ../trunk-jpl/src/m/classes/friction.py (revision 25687) +++ ../trunk-jpl/src/m/classes/friction.py (revision 25688) @@ -20,26 +20,23 @@ self.coupling = 0 self.effective_pressure = np.nan self.effective_pressure_limit = 0 - - #set defaults self.setdefaultparameters() #}}} def __repr__(self): # {{{ - s = "Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s - 1) * u_b, \n(effective stress Neff = rho_ice * g * thickness + rho_water * g * base, r = q / p and s = 1 / p)\n" - s = "{}\n".format(fielddisplay(self, "coefficient", "friction coefficient [SI]")) - s = "{}\n".format(fielddisplay(self, "p", "p exponent")) - s = "{}\n".format(fielddisplay(self, "q", "q exponent")) - s = "{}\n".format(fielddisplay(self, 'coupling', 'Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: used coupled model (not implemented yet)')) - s = "{}\n".format(fielddisplay(self, 'effective_pressure', 'Effective Pressure for the forcing if not coupled [Pa]')) - s = "{}\n".format(fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)')) - + s = 'Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s - 1) * u_b,\n' + s += '(effective stress Neff = rho_ice * g * thickness + rho_water * g * base, r = q / p and s = 1 / p)\n' + s += '{}\n'.format(fielddisplay(self, "coefficient", "friction coefficient [SI]")) + s += '{}\n'.format(fielddisplay(self, "p", "p exponent")) + s += '{}\n'.format(fielddisplay(self, "q", "q exponent")) + s += '{}\n'.format(fielddisplay(self, 'coupling', 'Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: used coupled model (not implemented yet)')) + s += '{}\n'.format(fielddisplay(self, 'effective_pressure', 'Effective Pressure for the forcing if not coupled [Pa]')) + s += '{}\n'.format(fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)')) return s #}}} def setdefaultparameters(self): # {{{ self.effective_pressure_limit = 0 - return self #}}} @@ -47,7 +44,7 @@ self.coefficient = project3d(md, 'vector', self.coefficient, 'type', 'node', 'layer', 1) self.p = project3d(md, 'vector', self.p, 'type', 'element') self.q = project3d(md, 'vector', self.q, 'type', 'element') - #if self.coupling == 0: #doesnt work with empty loop, so just skip it? + # If self.coupling == 0: #doesnt work with empty loop, so just skip it? if self.coupling in[3, 4]: self.effective_pressure = project3d(md, 'vector', self.effective_pressure, 'type', 'node', 'layer', 1) elif self.coupling > 4: @@ -61,12 +58,11 @@ #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: return md - if solution == 'TransientSoluition' and not md.transient.isstressbalance and not md.transient.isthermal: + if solution == 'TransientSolution' and not md.transient.isstressbalance and not md.transient.isthermal: return md - md = checkfield(md, 'fieldname', 'friction.coefficient', 'timeseries', 1, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.q', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) md = checkfield(md, 'fieldname', 'friction.p', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) @@ -76,13 +72,11 @@ md = checkfield(md, 'fieldname', 'friction.effective_pressure', 'NaN', 1, 'Inf', 1, 'timeseries', 1) elif self.coupling > 4: raise ValueError('not supported yet') - return md # }}} def marshall(self, prefix, md, fid): # {{{ WriteData(fid, prefix, 'name', 'md.friction.law', 'data', 1, 'format', 'Integer') - if type(self.coefficient) in [np.ndarray] and (self.coefficient.shape[0] == md.mesh.numberofvertices or self.coefficient.shape[0] == (md.mesh.numberofvertices + 1)): mattype = 1 tsl = md.mesh.numberofvertices @@ -89,7 +83,6 @@ else: mattype = 2 tsl = md.mesh.numberofelements - WriteData(fid, prefix, 'object', self, 'fieldname', 'coefficient', 'format', 'DoubleMat', 'mattype', mattype, 'timeserieslength', tsl + 1, 'yts', md.constants.yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'p', 'format', 'DoubleMat', 'mattype', 2) WriteData(fid, prefix, 'object', self, 'fieldname', 'q', 'format', 'DoubleMat', 'mattype', 2) Index: ../trunk-jpl/src/m/classes/dslmme.py =================================================================== --- ../trunk-jpl/src/m/classes/dslmme.py (revision 25687) +++ ../trunk-jpl/src/m/classes/dslmme.py (revision 25688) @@ -7,12 +7,11 @@ class dslmme(object): - ''' - DSLMME class definition + """DSLMME class definition - Usage: - dsl = dslmme() #dynamic sea level class based on a multi-model ensemble of CMIP5 outputs - ''' + Usage: + dsl = dslmme() #dynamic sea level class based on a multi-model ensemble of CMIP5 outputs + """ def __init__(self, *args): #{{{ self.modelid = 0 #index into the multi-model ensemble @@ -36,30 +35,25 @@ s += '{}\n'.format(fielddisplay(self, 'sea_surface_height_change_above_geoid', 'corresponds to zos fields in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr) for each ensemble.')) s += '{}\n'.format(fielddisplay(self, 'sea_water_pressure_change_at_sea_floor', 'corresponds to bpo fields in CMIP5 archives. Specified as a spatio-temporally variable rate (in mm/yr) for each ensemble.')) s += '{}\n'.format(fielddisplay(self, 'compute_fingerprints', 'do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid')) - return s #}}} - def setdefaultparameters(self): # {{{ - return + def setdefaultparameters(self): #{{{ + return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and md.transient.isslr == 0): + if ('SealevelriseAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isslr): return md - for i in range(len(self.global_average_thermosteric_sea_level_change)): md = checkfield(md, 'field', self.global_average_thermosteric_sea_level_change[i], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'field', self.sea_surface_height_change_above_geoid[i], 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'field', self.sea_water_pressure_change_at_sea_floor[i], 'NaN', 1, 'Inf', 1, 'timeseries', 1) - md = checkfield(md, 'field', self.modelid, 'NaN', 1, 'Inf', 1, '>=', 1, '<=',len(self.global_average_thermosteric_sea_level_change)) - if self.compute_fingerprints: #check geodetic flag of slr is on - if md.solidearth.settings.computesealevelchange == 0, + if not md.solidearth.settings.computesealevelchange: raise Exception('DSL checkconsistency error message: if bottom pressure fingerprints computations are requested, slr class should have geodetic flag on') - return md #}}} Index: ../trunk-jpl/src/m/classes/initialization.py =================================================================== --- ../trunk-jpl/src/m/classes/initialization.py (revision 25687) +++ ../trunk-jpl/src/m/classes/initialization.py (revision 25688) @@ -1,13 +1,13 @@ import numpy as np + +from checkfield import checkfield +from fielddisplay import fielddisplay from project3d import project3d -from fielddisplay import fielddisplay -from checkfield import checkfield from WriteData import WriteData class initialization(object): - """ - INITIALIZATION class definition + """INITIALIZATION class definition Usage: initialization = initialization() @@ -14,45 +14,42 @@ """ def __init__(self): # {{{ + self.vx = np.nan + self.vy = np.nan + self.vz = np.nan + self.vel = np.nan + self.enthalpy = np.nan + self.pressure = np.nan + self.temperature = np.nan + self.waterfraction = np.nan + self.watercolumn = np.nan + self.sediment_head = np.nan + self.epl_head = np.nan + self.epl_thickness = np.nan + self.hydraulic_potential = np.nan + self.channelarea = np.nan - self.vx = float('NaN') - self.vy = float('NaN') - self.vz = float('NaN') - self.vel = float('NaN') - self.enthalpy = float('NaN') - self.pressure = float('NaN') - self.temperature = float('NaN') - self.waterfraction = float('NaN') - self.watercolumn = float('NaN') - self.sediment_head = float('NaN') - self.epl_head = float('NaN') - self.epl_thickness = float('NaN') - self.hydraulic_potential = float('NaN') - self.channelarea = float('NaN') - - #set defaults + #set defaults self.setdefaultparameters() - #}}} def __repr__(self): # {{{ - string = ' initial field values:' - string = "%s\n%s" % (string, fielddisplay(self, 'vx', 'x component of velocity [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'vy', 'y component of velocity [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'vz', 'z component of velocity [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'vel', 'velocity norm [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'pressure', 'pressure [Pa]')) - string = "%s\n%s" % (string, fielddisplay(self, 'temperature', 'temperature [K]')) - string = "%s\n%s" % (string, fielddisplay(self, 'enthalpy', 'enthalpy [J]')) - string = "%s\n%s" % (string, fielddisplay(self, 'waterfraction', 'fraction of water in the ice')) - string = "%s\n%s" % (string, fielddisplay(self, 'watercolumn', 'thickness of subglacial water [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'sediment_head', 'sediment water head of subglacial system [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'epl_head', 'epl water head of subglacial system [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'epl_thickness', 'thickness of the epl [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'hydraulic_potential', 'Hydraulic potential (for GlaDS) [Pa]')) - string = "%s\n%s" % (string, fielddisplay(self, 'channelarea', 'subglaciale water channel area (for GlaDS) [m2]')) - - return string + s = ' initial field values:\n' + s += '{}\n'.format(fielddisplay(self, 'vx', 'x component of velocity [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'vy', 'y component of velocity [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'vz', 'z component of velocity [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'vel', 'velocity norm [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'pressure', 'pressure [Pa]')) + s += '{}\n'.format(fielddisplay(self, 'temperature', 'temperature [K]')) + s += '{}\n'.format(fielddisplay(self, 'enthalpy', 'enthalpy [J]')) + s += '{}\n'.format(fielddisplay(self, 'waterfraction', 'fraction of water in the ice')) + s += '{}\n'.format(fielddisplay(self, 'watercolumn', 'thickness of subglacial water [m]')) + s += '{}\n'.format(fielddisplay(self, 'sediment_head', 'sediment water head of subglacial system [m]')) + s += '{}\n'.format(fielddisplay(self, 'epl_head', 'epl water head of subglacial system [m]')) + s += '{}\n'.format(fielddisplay(self, 'epl_thickness', 'thickness of the epl [m]')) + s += '{}\n'.format(fielddisplay(self, 'hydraulic_potential', 'Hydraulic potential (for GlaDS) [Pa]')) + s += '{}\n'.format(fielddisplay(self, 'channelarea', 'subglaciale water channel area (for GlaDS) [m2]')) + return s #}}} def extrude(self, md): # {{{ @@ -69,11 +66,8 @@ self.epl_thickness = project3d(md, 'vector', self.epl_thickness, 'type', 'node', 'layer', 1) #Lithostatic pressure by default - # self.pressure = md.constants.g * md.materials.rho_ice * (md.geometry.surface[:, 0] - md.mesh.z) - #self.pressure = md.constants.g * md.materials.rho_ice * (md.geometry.surface-md.mesh.z.reshape(-1, )) - if np.ndim(md.geometry.surface) == 2: - print('Reshaping md.geometry.surface for you convenience but you should fix it in you files') + print('Reshaping md.geometry.surface for your convenience but you should fix it in your model set up') self.pressure = md.constants.g * md.materials.rho_ice * (md.geometry.surface.reshape(-1, ) - md.mesh.z) else: self.pressure = md.constants.g * md.materials.rho_ice * (md.geometry.surface - md.mesh.z) @@ -86,21 +80,21 @@ #}}} def checkconsistency(self, md, solution, analyses): # {{{ - if 'StressbalanceAnalysis' in analyses: + if 'StressbalanceAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.isstressbalance: if not np.any(np.logical_or(np.isnan(md.initialization.vx), np.isnan(md.initialization.vy))): md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'initialization.vy', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) - if 'MasstransportAnalysis' in analyses: + if 'MasstransportAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.ismasstransport: md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'initialization.vy', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) - if 'BalancethicknessAnalysis' in analyses: + if 'BalancethicknessAnalysis' in analyses and solution == 'BalancethicknessSolution': md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'initialization.vy', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) - #Triangle with zero velocity + # Triangle with zero velocity if np.any(np.logical_and(np.sum(np.abs(md.initialization.vx[md.mesh.elements - 1]), axis=1) == 0, np.sum(np.abs(md.initialization.vy[md.mesh.elements - 1]), axis=1) == 0)): md.checkmessage("at least one triangle has all its vertices with a zero velocity") - if 'ThermalAnalysis' in analyses and not (solution == 'TransientSolution' and md.transient.isthermal == 0): + if 'ThermalAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.isthermal: md = checkfield(md, 'fieldname', 'initialization.vx', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'initialization.vy', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'initialization.temperature', 'NaN', 1, 'Inf', 1, 'size', 'universal') @@ -107,12 +101,12 @@ if md.mesh.dimension() == 3: md = checkfield(md, 'fieldname', 'initialization.vz', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'initialization.pressure', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) - if ('EnthalpyAnalysis' in analyses and md.thermal.isenthalpy): - md = checkfield(md, 'fieldname', 'initialization.waterfraction', '>=', 0, 'size', [md.mesh.numberofvertices]) - md = checkfield(md, 'fieldname', 'initialization.watercolumn', '>=', 0, 'size', [md.mesh.numberofvertices]) - pos = np.nonzero(md.initialization.waterfraction > 0.)[0] - if(pos.size): - md = checkfield(md, 'fieldname', 'delta Tpmp', 'field', np.absolute(md.initialization.temperature[pos] - (md.materials.meltingpoint - md.materials.beta * md.initialization.pressure[pos])), '<', 1e-11, 'message', 'set temperature to pressure melting point at locations with waterfraction > 0') + if 'EnthalpyAnalysis' in analyses and md.thermal.isenthalpy: + md = checkfield(md, 'fieldname', 'initialization.waterfraction', '>=', 0, 'size', [md.mesh.numberofvertices]) + md = checkfield(md, 'fieldname', 'initialization.watercolumn', '>=', 0, 'size', [md.mesh.numberofvertices]) + pos = np.nonzero(md.initialization.waterfraction > 0.)[0] + if(pos.size): + md = checkfield(md, 'fieldname', 'delta Tpmp', 'field', np.absolute(md.initialization.temperature[pos] - (md.materials.meltingpoint - md.materials.beta * md.initialization.pressure[pos])), '<', 1e-11, 'message', 'set temperature to pressure melting point at locations with waterfraction > 0') if 'HydrologyShreveAnalysis' in analyses: if hasattr(md.hydrology, 'hydrologyshreve'): md = checkfield(md, 'fieldname', 'initialization.watercolumn', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) @@ -133,9 +127,7 @@ # }}} def marshall(self, prefix, md, fid): # {{{ - yts = md.constants.yts - WriteData(fid, prefix, 'object', self, 'fieldname', 'vx', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'vy', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'vz', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts) Index: ../trunk-jpl/src/m/classes/SMBforcing.py =================================================================== --- ../trunk-jpl/src/m/classes/SMBforcing.py (revision 25687) +++ ../trunk-jpl/src/m/classes/SMBforcing.py (revision 25688) @@ -1,41 +1,46 @@ import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay -from checkfield import checkfield +from project3d import project3d from WriteData import WriteData -from project3d import project3d class SMBforcing(object): - """ - SMBforcing Class definition + """SMBFORCING class definition - Usage: - SMB = SMBforcing() + Usage: + SMB = SMBforcing() """ - def __init__(self): # {{{ - self.mass_balance = float('NaN') - self.requested_outputs = [] + def __init__(self, *args): # {{{ self.isclimatology = 0 + self.mass_balance = np.nan self.steps_per_step = 1 + self.requested_outputs = [] self.averaging = 0 + + nargs = len(args) + if nargs == 0: + pass + else: + raise Exception('constructor not supported') #}}} def __repr__(self): # {{{ - string = " surface forcings parameters:" - string = "%s\n%s" % (string, fielddisplay(self, 'mass_balance', 'surface mass balance [m/yr ice eq]')) - string = "%s\n%s" % (string, fielddisplay(self, 'isclimatology', 'repeat all forcings when past last forcing time (default false)')) - string = "%s\n%s" % (string, fielddisplay(self, 'steps_per_step', 'number of smb steps per time step')) - string = "%s\n%s" % (string, fielddisplay(self, 'averaging', 'averaging methods from short to long steps')) - string = "%s\n\t\t%s" % (string, '0: Arithmetic (default)') - string = "%s\n\t\t%s" % (string, '1: Geometric') - string = "%s\n\t\t%s" % (string, '2: Harmonic') - string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) - return string + s = ' surface forcings parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'mass_balance', 'surface mass balance [m/yr ice eq]')) + s += '{}\n'.format(fielddisplay(self, 'isclimatology', 'repeat all forcings when past last forcing time (default false)')) + s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step')) + s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps')) + s += '\t\t{}\n'.format('0: Arithmetic (default)') + s += '\t\t{}\n'.format('1: Geometric') + s += '\t\t{}\n'.format('2: Harmonic') + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested')) + return s #}}} def extrude(self, md): # {{{ - self.mass_balance = project3d(md, 'vector', self.mass_balance, 'type', 'node') return self #}}} @@ -48,24 +53,22 @@ if np.all(np.isnan(self.mass_balance)): self.mass_balance = np.zeros((md.mesh.numberofvertices)) print(" no SMBforcing.mass_balance specified: values set as zero") - return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ + if solution == 'TransientSolution' and not md.transient.issmb: + return if 'MasstransportAnalysis' in analyses: md = checkfield(md, 'fieldname', 'smb.mass_balance', 'timeseries', 1, 'NaN', 1, 'Inf', 1) - if 'BalancethicknessAnalysis' in analyses: md = checkfield(md, 'fieldname', 'smb.mass_balance', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) - md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1]) md = checkfield(md, 'fieldname', 'smb.averaging', 'numel', [1], 'values', [0, 1, 2]) md = checkfield(md, 'fieldname', 'masstransport.requested_outputs', 'stringrow', 1) md = checkfield(md, 'fieldname', 'smb.isclimatology', 'values', [0, 1]) - if (self.isclimatology > 0): + if self.isclimatology: md = checkfield(md, 'fieldname', 'smb.mass_balance', 'size', [md.mesh.numberofvertices + 1], 'message', 'mass_balance must have md.mesh.numberofvertices + 1 rows in order to force a climatology') - return md # }}} @@ -84,5 +87,4 @@ outputs = outputscopy WriteData(fid, prefix, 'data', outputs, 'name', 'md.smb.requested_outputs', 'format', 'StringArray') WriteData(fid, prefix, 'object', self, 'class', 'smb', 'fieldname', 'isclimatology', 'format', 'Boolean') - # }}} Index: ../trunk-jpl/src/m/classes/spatiallinearbasalforcings.m =================================================================== --- ../trunk-jpl/src/m/classes/spatiallinearbasalforcings.m (revision 25687) +++ ../trunk-jpl/src/m/classes/spatiallinearbasalforcings.m (revision 25688) @@ -80,7 +80,7 @@ end end % }}} function disp(self) % {{{ - disp(sprintf(' basal forcings parameters:')); + disp(sprintf(' spatial linear basal forcings parameters:')); fielddisplay(self,'groundedice_melting_rate','basal melting rate (positive if melting) [m/yr]'); fielddisplay(self,'deepwater_melting_rate','basal melting rate (positive if melting applied for floating ice whith base < deepwater_elevation) [m/yr]'); @@ -99,12 +99,12 @@ pos=find(md.geometry.base>md.basalforcings.deepwater_elevation & md.geometry.base 2: raise ValueError('not supported yet') - return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: return md - md = checkfield(md, 'fieldname', 'friction.coefficient', 'timeseries', 1, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.coefficientcoulomb', 'timeseries', 1, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.q', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) @@ -76,7 +72,6 @@ raise ValueError('not implemented yet') elif self.coupling > 2: raise ValueError('not supported yet') - return md # }}} Index: ../trunk-jpl/src/m/classes/spatiallinearbasalforcings.py =================================================================== --- ../trunk-jpl/src/m/classes/spatiallinearbasalforcings.py (nonexistent) +++ ../trunk-jpl/src/m/classes/spatiallinearbasalforcings.py (revision 25688) @@ -0,0 +1,110 @@ +import numpy as np + +from checkfield import checkfield +from fielddisplay import fielddisplay +from project3d import project3d +from structtoobj import structtoobj +from WriteData import WriteData + + +class spatiallinearbasalforcings(object): + """SPATIAL LINEAR BASAL FORCINGS class definition + + Usage: + spatiallinearbasalforcings = spatiallinearbasalforcings() + """ + + def __init__(self, *args): #{{{ + nargs = len(args): + if nargs == 0: + self.groundedice_melting_rate = np.nan + self.deepwater_melting_rate = np.nan + self.deepwater_elevation = np.nan + self.upperwater_elevation = np.nan + self.geothermalflux = np.nan + + self.setdefaultparameters() + elif nargs == 1: + lb = args[0] + if lb.__module__ == 'linearbasalforcings': + nvertices = len(lb.groundedice_melting_rate) + self.groundedice_melting_rate = lb.groundedice_melting_rate + self.deepwater_melting_rate = lb.geothermalflux + self.deepwater_elevation = lb.deepwater_elevation * np.ones((nvertices, )) + self.upperwater_elevation = lb.deepwater_melting_rate * np.ones((nvertices, )) + self.geothermalflux = lb.upperwater_elevation * np.ones((nvertices, )) + else: + # TODO: This has not been tested + self = structtoobj(self, lb) + else: + raise Exception('constructor not supported') + #}}} + + def __repr__(self): #{{{ + s = ' spatial linear basal forcings parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'groundedice_melting_rate', 'basal melting rate (positive if melting) [m/yr]')) + s += '{}\n'.format(fielddisplay(self, 'deepwater_melting_rate', 'basal melting rate (positive if melting applied for floating ice whith base < deepwater_elevation) [m/yr]')) + s += '{}\n'.format(fielddisplay(self, 'deepwater_elevation', 'elevation of ocean deepwater [m]')) + s += '{}\n'.format(fielddisplay(self, 'upperwater_elevation', 'elevation of ocean upperwater [m]')) + s += '{}\n'.format(fielddisplay(self, 'geothermalflux', 'geothermal heat flux [W/m^2]')) + return s + #}}} + + def extrude(self, md): #{{{ + self.groundedice_melting_rate = project3d(md, 'vector', self.groundedice_melting_rate, 'type', 'node', 'layer', 1) + self.deepwater_melting_rate = project3d(md, 'vector', self.deepwater_melting_rate, 'type', 'node', 'layer', 1) + self.deepwater_elevation = project3d(md, 'vector', self.deepwater_elevation, 'type', 'node', 'layer', 1) + self.upperwater_elevation = project3d(md, 'vector', self.upperwater_elevation, 'type', 'node', 'layer', 1) + self.geothermalflux = project3d(md, 'vector', self.geothermalflux, 'type', 'node', 'layer', 1) # Bedrock only gets geothermal flux + return self + #}}} + + def initialize(self, md): #{{{ + if np.all(np.isnan(self.groundedice_melting_rate)): + self.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices)) + print(' no basalforcings.groundedice_melting_rate specified: values set as zero') + return self + #}}} + + def setdefaultparameters(self): #{{{ + return self + #}}} + + def checkconsistency(self, md, solution, analyses): #{{{ + if 'MasstransportAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.ismasstransport: + md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) + md = checkfield(md, 'fieldname', 'basalforcings.deepwater_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1, '>=', 0) + md = checkfield(md, 'fieldname', 'basalforcings.deepwater_elevation', 'NaN', 1, 'Inf', 1, 'timeseries', 1) + md = checkfield(md, 'fieldname', 'basalforcings.upperwater_elevation', 'NaN', 1, 'Inf', 1, 'timeseries', 1, '<', 0) + if 'BalancethicknessAnalysis' in analyses: + raise Exception('not implemented yet!') + md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) + md = checkfield(md, x'fieldname', 'basalforcings.deepwater_melting_rate', '>=', 0, 'numel', 1) + md = checkfield(md, 'fieldname', 'basalforcings.deepwater_elevation', '<', 'basalforcings.upperwater_elevation', 'numel', 1) + md = checkfield(md, 'fieldname', 'basalforcings.upperwater_elevation', '<=', 0, 'numel', 1) + if 'ThermalAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.isthermal: + raise Exception('not implemented yet!') + md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) + md = checkfield(md, 'fieldname', 'basalforcings.deepwater_melting_rate', '>=', 0, 'numel', 1) + md = checkfield(md, 'fieldname', 'basalforcings.deepwater_elevation', '<', 'basalforcings.upperwater_elevation', 'numel', 1) + md = checkfield(md, 'fieldname', 'basalforcings.upperwater_elevation', '<', 0, 'numel', 1) + md = checkfield(md, 'fieldname', 'basalforcings.geothermalflux', 'NaN', 1, 'Inf', 1, 'timeseries', 1, '>=', 0) + return md + # }}} + + def marshall(self, prefix, md, fid): #{{{ + yts = md.constants.yts + + floatingice_melting_rate = np.zeros((md.mesh.numberofvertices, )) + pos = np.where(md.geometry.base <= md.basalforcings.deepwater_elevation)[0] + floatingice_melting_rate[pos] = md.basalforcings.deepwater_melting_rate[pos] + pos = np.where(np.logical_and.reduce(md.geometry.base > md.basalforcings.deepwater_elevation, md.geometry.base < md.basalforcings.upperwater_elevation)) + floatingice_melting_rate[pos] = md.basalforcings.deepwater_melting_rate[pos] * (md.geometry.base[pos] - md.basalforcings.upperwater_elevation[pos]) / (md.basalforcings.deepwater_elevation[pos] - md.basalforcings.upperwater_elevation[pos]) + WriteData(fid, prefix, 'name', 'md.basalforcings.model', 'data', 6, 'format', 'Integer') + WriteData(fid, prefix, 'data', floatingice_melting_rate, 'format', 'DoubleMat', 'name', 'md.basalforcings.floatingice_melting_rate', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) + WriteData(fid, prefix, 'object', self, 'fieldname', 'groundedice_melting_rate', 'format', 'DoubleMat', 'name', 'md.basalforcings.groundedice_melting_rate', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1,'yts', md.constants.yts) + WriteData(fid, prefix,'object', self, 'fieldname', 'geothermalflux', 'name', 'md.basalforcings.geothermalflux', 'format', 'DoubleMat', 'mattype', 1,'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) + WriteData(fid, prefix, 'object', self, 'fieldname', 'deepwater_melting_rate', 'format', 'DoubleMat', 'name', 'md.basalforcings.deepwater_melting_rate', 'scale', 1. / yts, 'mattype', 1) + WriteData(fid, prefix, 'object', self, 'fieldname', 'deepwater_elevation', 'format', 'DoubleMat', 'name', 'md.basalforcings.deepwater_elevation', 'mattype', 1) + WriteData(fid, prefix, 'object', self, 'fieldname', 'upperwater_elevation', 'format', 'DoubleMat', 'name', 'md.basalforcings.upperwater_elevation', 'mattype', 1) + #}}} Index: ../trunk-jpl/src/m/classes/qmu/normal_uncertain.m =================================================================== --- ../trunk-jpl/src/m/classes/qmu/normal_uncertain.m (revision 25687) +++ ../trunk-jpl/src/m/classes/qmu/normal_uncertain.m (revision 25688) @@ -160,7 +160,7 @@ scale=[]; end % }}} %new methods: - function distributed=isdistributed(self) % {{{ + function distributed=isdistributed(self) % {{{ if strncmp(self.descriptor,'distributed_',12), distributed=1; else @@ -167,7 +167,7 @@ distributed=0; end end % }}} - function scaled=isscaled(self) % {{{ + function scaled=isscaled(self) % {{{ if strncmp(self.descriptor,'scaled_',7), scaled=1; else Index: ../trunk-jpl/src/m/classes/qmu/response_function.m =================================================================== --- ../trunk-jpl/src/m/classes/qmu/response_function.m (revision 25687) +++ ../trunk-jpl/src/m/classes/qmu/response_function.m (revision 25688) @@ -115,13 +115,13 @@ grell=allempty(grell); end % }}} %new methods: - function scaled =isscaled(self) % {{{ - if strncmp(self.descriptor,'scaled_',7), - scaled=1; - else - scaled=0; - end - end % }}} + function scaled =isscaled(self) % {{{ + if strncmp(self.descriptor,'scaled_',7), + scaled=1; + else + scaled=0; + end + end % }}} end methods (Static) function [rdesc]=dakota_write(fidi,dresp,rdesc) % {{{ Index: ../trunk-jpl/src/m/classes/qmu/normal_uncertain.py =================================================================== --- ../trunk-jpl/src/m/classes/qmu/normal_uncertain.py (revision 25687) +++ ../trunk-jpl/src/m/classes/qmu/normal_uncertain.py (revision 25688) @@ -9,8 +9,7 @@ class normal_uncertain(object): - ''' - NORMAL_UNCERTAIN class definition + """NORMAL_UNCERTAIN class definition Usage: [nuv] = normal_uncertain( @@ -37,7 +36,8 @@ 'stddev',.05, 'partition',vpartition ) - ''' + """ + def __init__(self): #{{{ self.descriptor = '' self.mean = np.nan @@ -230,6 +230,13 @@ #}}} #new methods: + def isdistributed(self): #{{{ + if strncmp(self.descriptor, 'distributed_', 12): + return True + else: + return False + #}}} + def isscaled(self): #{{{ if strncmp(self.descriptor, 'scaled_', 7): return True Index: ../trunk-jpl/src/m/classes/giaivins.m =================================================================== --- ../trunk-jpl/src/m/classes/giaivins.m (revision 25687) +++ ../trunk-jpl/src/m/classes/giaivins.m (revision 25688) @@ -34,7 +34,7 @@ %figure out if thickness is a transient forcing: if size(md.geometry.thickness,1)==md.mesh.numberofvertices+1, %recover the furthest time "in time": - if(thickness(end,end)~=md.timestepping.start_time), + if(md.geometry.thickness(end,end)~=md.timestepping.start_time), md = checkmessage(md,['if ismasstransport is on, transient thickness forcing'... ' for the giaivins model should not be provided in the future.'... ' Synchronize your start_time to correspond to the most recent transient'... @@ -50,7 +50,7 @@ fielddisplay(self,'mantle_viscosity','mantle viscosity[Pa s]'); fielddisplay(self,'lithosphere_thickness','lithosphere thickness (km)'); fielddisplay(self,'cross_section_shape','1: square-edged (default). 2: elliptical. See iedge in GiaDeflectionCore'); - +True end % }}} function marshall(self,prefix,md,fid) % {{{ WriteData(fid,prefix,'name','md.gia.model','data',1,'format','Integer'); Index: ../trunk-jpl/src/m/classes/geometry.py =================================================================== --- ../trunk-jpl/src/m/classes/geometry.py (revision 25687) +++ ../trunk-jpl/src/m/classes/geometry.py (revision 25688) @@ -7,14 +7,13 @@ class geometry(object): - ''' - GEOMETRY class definition + """GEOMETRY class definition - Usage: - geometry = geometry() - ''' + Usage: + geometry = geometry() + """ - def __init__(self): #{{{ + def __init__(self, *args): #{{{ self.surface = np.nan self.thickness = np.nan self.base = np.nan @@ -21,17 +20,19 @@ self.bed = np.nan self.hydrostatic_ratio = np.nan - #set defaults - self.setdefaultparameters() + nargs = len(args) + if nargs == 0: + self.setdefaultparameters() + else: + raise Exception('constructor not supported') #}}} def __repr__(self): #{{{ - s = " geometry parameters:" - s = "%s\n%s" % (s, fielddisplay(self, 'surface', 'ice upper surface elevation [m]')) - s = "%s\n%s" % (s, fielddisplay(self, 'thickness', 'ice thickness [m]')) - s = "%s\n%s" % (s, fielddisplay(self, 'base', 'ice base elevation [m]')) - s = "%s\n%s" % (s, fielddisplay(self, 'bed', 'bed elevation [m]')) - + s = ' geometry parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'surface', 'ice upper surface elevation [m]')) + s += '{}\n'.format(fielddisplay(self, 'thickness', 'ice thickness [m]')) + s += '{}\n'.format(fielddisplay(self, 'base', 'ice base elevation [m]')) + s += '{}\n'.format(fielddisplay(self, 'bed', 'bed elevation [m]')) return s #}}} Index: ../trunk-jpl/src/m/classes/flowequation.py =================================================================== --- ../trunk-jpl/src/m/classes/flowequation.py (revision 25687) +++ ../trunk-jpl/src/m/classes/flowequation.py (revision 25688) @@ -22,7 +22,7 @@ self.isHO = 0 self.isFS = 0 self.isNitscheBC = 0 - self.FSNitscheGamma = 1e-6 + self.FSNitscheGamma = 1e6 self.fe_SSA = '' self.fe_HO = '' self.fe_FS = '' @@ -31,67 +31,61 @@ self.augmented_lagrangian_rlambda = 1. self.augmented_lagrangian_rholambda = 1. self.XTH_theta = 0. - self.vertex_equation = float('NaN') - self.element_equation = float('NaN') - self.borderSSA = float('NaN') - self.borderHO = float('NaN') - self.borderFS = float('NaN') - - #set defaults + self.vertex_equation = np.nan + self.element_equation = np.nan + self.borderSSA = np.nan + self.borderHO = np.nan + self.borderFS = np.nan + self.setdefaultparameters() - #}}} def __repr__(self): # {{{ s = ' flow equation parameters:\n' - s += "{}\n".format(fielddisplay(self, 'isSIA', "is the Shallow Ice Approximation (SIA) used?")) - s += "{}\n".format(fielddisplay(self, 'isSSA', "is the Shelfy-Stream Approximation (SSA) used?")) - s += "{}\n".format(fielddisplay(self, 'isL1L2', "are L1L2 equations used?")) - s += "{}\n".format(fielddisplay(self, 'isMLHO', "are Mono-layer Higher-Order equations used?")) - s += "{}\n".format(fielddisplay(self, 'isHO', "is the Higher-Order (HO) approximation used?")) - s += "{}\n".format(fielddisplay(self, 'isFS', "are the Full-FS (FS) equations used?")) - s += "{}\n".format(fielddisplay(self, 'isNitscheBC', "is weakly imposed condition used?")) - s += "{}\n".format(fielddisplay(self, 'FSNitscheGamma', "Gamma value for the Nitsche term (default: 1e6)")) - s += "{}\n".format(fielddisplay(self, 'fe_SSA', "Finite Element for SSA: 'P1', 'P1bubble' 'P1bubblecondensed' 'P2'")) - s += "{}\n".format(fielddisplay(self, 'fe_HO', "Finite Element for HO: 'P1', 'P1bubble', 'P1bubblecondensed', 'P1xP2', 'P2xP1', 'P2', 'P2bubble', 'P1xP3', 'P2xP4'")) - s += "{}\n".format(fielddisplay(self, 'fe_FS', "Finite Element for FS: 'P1P1' (debugging only) 'P1P1GLS' 'MINIcondensed' 'MINI' 'TaylorHood' 'LATaylorHood' 'XTaylorHood'")) - s += "{}\n".format(fielddisplay(self, 'vertex_equation', "flow equation for each vertex")) - s += "{}\n".format(fielddisplay(self, 'element_equation', "flow equation for each element")) - s += "{}\n".format(fielddisplay(self, 'borderSSA', "vertices on SSA's border (for tiling)")) - s += "{}\n".format(fielddisplay(self, 'borderHO', "vertices on HO's border (for tiling)")) - s += "{}".format(fielddisplay(self, 'borderFS', "vertices on FS' border (for tiling)")) - + s += '{}\n'.format(fielddisplay(self, 'isSIA', "is the Shallow Ice Approximation (SIA) used?")) + s += '{}\n'.format(fielddisplay(self, 'isSSA', "is the Shelfy-Stream Approximation (SSA) used?")) + s += '{}\n'.format(fielddisplay(self, 'isL1L2', "are L1L2 equations used?")) + s += '{}\n'.format(fielddisplay(self, 'isMLHO', "are Mono-layer Higher-Order equations used?")) + s += '{}\n'.format(fielddisplay(self, 'isHO', "is the Higher-Order (HO) approximation used?")) + s += '{}\n'.format(fielddisplay(self, 'isFS', "are the Full-FS (FS) equations used?")) + s += '{}\n'.format(fielddisplay(self, 'isNitscheBC', "is weakly imposed condition used?")) + s += '{}\n'.format(fielddisplay(self, 'FSNitscheGamma', "Gamma value for the Nitsche term (default: 1e6)")) + s += '{}\n'.format(fielddisplay(self, 'fe_SSA', "Finite Element for SSA: 'P1', 'P1bubble' 'P1bubblecondensed' 'P2'")) + s += '{}\n'.format(fielddisplay(self, 'fe_HO', "Finite Element for HO: 'P1', 'P1bubble', 'P1bubblecondensed', 'P1xP2', 'P2xP1', 'P2', 'P2bubble', 'P1xP3', 'P2xP4'")) + s += '{}\n'.format(fielddisplay(self, 'fe_FS', "Finite Element for FS: 'P1P1' (debugging only) 'P1P1GLS' 'MINIcondensed' 'MINI' 'TaylorHood' 'LATaylorHood' 'XTaylorHood'")) + s += '{}\n'.format(fielddisplay(self, 'vertex_equation', "flow equation for each vertex")) + s += '{}\n'.format(fielddisplay(self, 'element_equation', "flow equation for each element")) + s += '{}\n'.format(fielddisplay(self, 'borderSSA', "vertices on SSA's border (for tiling)")) + s += '{}\n'.format(fielddisplay(self, 'borderHO', "vertices on HO's border (for tiling)")) + s += '{}\n'.format(fielddisplay(self, 'borderFS', "vertices on FS' border (for tiling)")) return s #}}} - def extrude(self, md): # {{{ - self.element_equation = project3d(md, 'vector', self.element_equation, 'type', 'element') - self.vertex_equation = project3d(md, 'vector', self.vertex_equation, 'type', 'node') - self.borderSSA = project3d(md, 'vector', self.borderSSA, 'type', 'node') - self.borderHO = project3d(md, 'vector', self.borderHO, 'type', 'node') - self.borderFS = project3d(md, 'vector', self.borderFS, 'type', 'node') - return self - #}}} - def setdefaultparameters(self): # {{{ - #P1 for SSA + # P1 for SSA self.fe_SSA = 'P1' - #P1 for HO + # P1 for HO self.fe_HO = 'P1' - #MINI condensed element for FS by default + # MINI condensed element for FS by default self.fe_FS = 'MINIcondensed' + return self + #}}} + def extrude(self, md): # {{{ + self.element_equation = project3d(md, 'vector', self.element_equation, 'type', 'element') + self.vertex_equation = project3d(md, 'vector', self.vertex_equation, 'type', 'node') + self.borderSSA = project3d(md, 'vector', self.borderSSA, 'type', 'node') + self.borderHO = project3d(md, 'vector', self.borderHO, 'type', 'node') + self.borderFS = project3d(md, 'vector', self.borderFS, 'type', 'node') return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - - #Early return + # Early return if ('StressbalanceAnalysis' not in analyses and 'StressbalanceSIAAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isstressbalance): return md - md = checkfield(md, 'fieldname', 'flowequation.isSIA', 'numel', [1], 'values', [0, 1]) md = checkfield(md, 'fieldname', 'flowequation.isSSA', 'numel', [1], 'values', [0, 1]) md = checkfield(md, 'fieldname', 'flowequation.isL1L2', 'numel', [1], 'values', [0, 1]) @@ -121,15 +115,13 @@ md = checkfield(md, 'fieldname', 'flowequation.vertex_equation', 'size', [md.mesh.numberofvertices], 'values', np.arange(0, 9 + 1)) md = checkfield(md, 'fieldname', 'flowequation.element_equation', 'size', [md.mesh.numberofelements], 'values', np.arange(0, 9 + 1)) else: - raise RuntimeError('mesh type not supported yet') + raise RuntimeError('Case not supported yet') if not (self.isSIA or self.isSSA or self.isL1L2 or self.isMLHO or self.isHO or self.isFS): md.checkmessage("no element types set for this model") - if 'StressbalanceSIAAnalysis' in analyses: if any(self.element_equation == 1): if np.any(np.logical_and(self.vertex_equation, md.mask.ocean_levelset)): print("\n !!! Warning: SIA's model is not consistent on ice shelves !!!\n") - return md # }}} @@ -153,8 +145,7 @@ WriteData(fid, prefix, 'object', self, 'fieldname', 'borderSSA', 'format', 'DoubleMat', 'mattype', 1) WriteData(fid, prefix, 'object', self, 'fieldname', 'borderHO', 'format', 'DoubleMat', 'mattype', 1) WriteData(fid, prefix, 'object', self, 'fieldname', 'borderFS', 'format', 'DoubleMat', 'mattype', 1) - #convert approximations to enums + # Convert approximations to enums WriteData(fid, prefix, 'data', self.vertex_equation, 'name', 'md.flowequation.vertex_equation', 'format', 'DoubleMat', 'mattype', 1) WriteData(fid, prefix, 'data', self.element_equation, 'name', 'md.flowequation.element_equation', 'format', 'DoubleMat', 'mattype', 2) - # }}} Index: ../trunk-jpl/src/m/classes/plumebasalforcings.py =================================================================== --- ../trunk-jpl/src/m/classes/plumebasalforcings.py (revision 25687) +++ ../trunk-jpl/src/m/classes/plumebasalforcings.py (revision 25688) @@ -1,56 +1,62 @@ import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay -from checkfield import checkfield +from project3d import project3d +from structtoobj import structtoobj from WriteData import WriteData -from project3d import project3d class plumebasalforcings(object): - ''' - PLUME BASAL FORCINGS class definition + """PLUME BASAL FORCINGS class definition - Usage: - plumebasalforcings = plumebasalforcings() - ''' + Usage: + plumebasalforcings = plumebasalforcings() + """ - def __init__(self): # {{{ - self.floatingice_melting_rate = float('NaN') - self.groundedice_melting_rate = float('NaN') - self.mantleconductivity = float('NaN') - self.nusselt = float('NaN') - self.dtbg = float('NaN') - self.plumeradius = float('NaN') - self.topplumedepth = float('NaN') - self.bottomplumedepth = float('NaN') - self.plumex = float('NaN') - self.plumey = float('NaN') - self.crustthickness = float('NaN') - self.uppercrustthickness = float('NaN') - self.uppercrustheat = float('NaN') - self.lowercrustheat = float('NaN') + def __init__(self, *args): # {{{ + self.floatingice_melting_rate = np.nan + self.groundedice_melting_rate = np.nan + self.mantleconductivity = np.nan + self.nusselt = np.nan + self.dtbg = np.nan + self.plumeradius = np.nan + self.topplumedepth = np.nan + self.bottomplumedepth = np.nan + self.plumex = np.nan + self.plumey = np.nan + self.crustthickness = np.nan + self.uppercrustthickness = np.nan + self.uppercrustheat = np.nan + self.lowercrustheat = np.nan - self.setdefaultparameters() + nargs = len(args) + if nargs == 0: + self.setdefaultparameters() + elif nargs == 1: + # TODO: This has not been tested + self = structtoobj(self, args[0]) + else: + error('constuctor not supported') #}}} def __repr__(self): # {{{ - string = ' mantle plume basal melt parameterization:' - - string = "%s\n%s" % (string, fielddisplay(self, 'groundedice_melting_rate', 'basal melting rate (positive if melting) [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'floatingice_melting_rate', 'basal melting rate (positive if melting) [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'mantleconductivity', 'mantle heat conductivity [W / m^3]')) - string = "%s\n%s" % (string, fielddisplay(self, 'nusselt', 'nusselt number, ratio of mantle to plume [1]')) - string = "%s\n%s" % (string, fielddisplay(self, 'dtbg', 'background temperature gradient [degree / m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'plumeradius', 'radius of the mantle plume [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'topplumedepth', 'depth of the mantle plume top below the crust [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'bottomplumedepth', 'depth of the mantle plume base below the crust [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'plumex', 'x coordinate of the center of the plume [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'plumey', 'y coordinate of the center of the plume [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'crustthickness', 'thickness of the crust [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'uppercrustthickness', 'thickness of the upper crust [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'uppercrustheat', 'volumic heat of the upper crust [w / m^3]')) - string = "%s\n%s" % (string, fielddisplay(self, 'lowercrustheat', 'volumic heat of the lowercrust [w / m^3]')) - - return string + s = ' mantle plume basal melt parameterization:\n' + s += '{}\n'.format(fielddisplay(self, 'groundedice_melting_rate', 'basal melting rate (positive if melting) [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'floatingice_melting_rate', 'basal melting rate (positive if melting) [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'mantleconductivity', 'mantle heat conductivity [W / m^3]')) + s += '{}\n'.format(fielddisplay(self, 'nusselt', 'nusselt number, ratio of mantle to plume [1]')) + s += '{}\n'.format(fielddisplay(self, 'dtbg', 'background temperature gradient [degree / m]')) + s += '{}\n'.format(fielddisplay(self, 'plumeradius', 'radius of the mantle plume [m]')) + s += '{}\n'.format(fielddisplay(self, 'topplumedepth', 'depth of the mantle plume top below the crust [m]')) + s += '{}\n'.format(fielddisplay(self, 'bottomplumedepth', 'depth of the mantle plume base below the crust [m]')) + s += '{}\n'.format(fielddisplay(self, 'plumex', 'x coordinate of the center of the plume [m]')) + s += '{}\n'.format(fielddisplay(self, 'plumey', 'y coordinate of the center of the plume [m]')) + s += '{}\n'.format(fielddisplay(self, 'crustthickness', 'thickness of the crust [m]')) + s += '{}\n'.format(fielddisplay(self, 'uppercrustthickness', 'thickness of the upper crust [m]')) + s += '{}\n'.format(fielddisplay(self, 'uppercrustheat', 'volumic heat of the upper crust [w / m^3]')) + s += '{}\n'.format(fielddisplay(self, 'lowercrustheat', 'volumic heat of the lowercrust [w / m^3]')) + return s #}}} def initialize(self, md): #{{{ @@ -60,7 +66,7 @@ if np.all(np.isnan(self.floatingice_melting_rate)): self.floatingice_melting_rate = np.zeros((md.mesh.numberofvertices, )) print(' no basalforcings.floatingice_melting_rate specified: values set as zero') - return + return self #}}} def extrude(self, md): # {{{ @@ -70,7 +76,7 @@ #}}} def setdefaultparameters(self): # {{{ - #default values for melting parameterization + # Default values for melting parameterization self.mantleconductivity = 2.2 self.nusselt = 300 self.dtbg = 11 / 1000. Index: ../trunk-jpl/src/m/classes/friction.m =================================================================== --- ../trunk-jpl/src/m/classes/friction.m (revision 25687) +++ ../trunk-jpl/src/m/classes/friction.m (revision 25688) @@ -65,7 +65,7 @@ case 4 otherwise - error('not supported yet'); + error('not supported yet'); end end % }}} function disp(self) % {{{ Index: ../trunk-jpl/src/m/classes/frictionwaterlayer.py =================================================================== --- ../trunk-jpl/src/m/classes/frictionwaterlayer.py (revision 25687) +++ ../trunk-jpl/src/m/classes/frictionwaterlayer.py (revision 25688) @@ -35,7 +35,6 @@ s = "{}\n".format(fielddisplay(self, 'p', 'p exponent')) s = "{}\n".format(fielddisplay(self, 'q', 'q exponent')) s = "{}\n".format(fielddisplay(self, 'water_layer', 'water thickness at the base of the ice (m)')) - return s #}}} @@ -44,15 +43,15 @@ #}}} def checkconsistency(self, md, solution, analyses): #{{{ - #Early return - if ('StressbalanceAnalysis' not in analyses) and ('ThermalAnalysis' not in analyses): + # Early return + if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: return - md = checkfield(md, 'fieldname', 'friction.coefficient', 'timeseries', 1, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.f', 'size', [1], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.q', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) md = checkfield(md, 'fieldname', 'friction.p', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) md = checkfield(md, 'fieldname', 'thermal.spctemperature', 'Inf', 1, 'timeseries', 1, '>=', 0.) + return md # }}} def extrude(self, md): # {{{ @@ -60,7 +59,6 @@ self.p = project3d(md, 'vector', self.p, 'type', 'element') self.q = project3d(md, 'vector', self.q, 'type', 'element') self.water_layer = project3d(md, 'vector', self.water_layer, 'type', 'node', 'layer', 1) - return self # }}} Index: ../trunk-jpl/src/m/classes/masstransport.py =================================================================== --- ../trunk-jpl/src/m/classes/masstransport.py (revision 25687) +++ ../trunk-jpl/src/m/classes/masstransport.py (revision 25688) @@ -1,16 +1,16 @@ import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay from project3d import project3d -from checkfield import checkfield from WriteData import WriteData class masstransport(object): - """ - MASSTRANSPORT class definition + """MASSTRANSPORT class definition - Usage: - masstransport = masstransport() + Usage: + masstransport = masstransport() """ def __init__(self): # {{{ @@ -23,21 +23,20 @@ self.penalty_factor = 0 self.requested_outputs = [] - #set defaults + # Set defaults self.setdefaultparameters() #}}} def __repr__(self): # {{{ - string = ' Masstransport solution parameters:' - string = "%s\n%s" % (string, fielddisplay(self, 'spcthickness', 'thickness constraints (NaN means no constraint) [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'isfreesurface', 'do we use free surfaces (FS only) or mass conservation')) - string = "%s\n%s" % (string, fielddisplay(self, 'min_thickness', 'minimum ice thickness allowed [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'hydrostatic_adjustment', 'adjustment of ice shelves surface and bed elevations: ''Incremental'' or ''Absolute'' ')) - string = "%s\n%s" % (string, fielddisplay(self, 'stabilization', '0: no, 1: artificial_diffusivity, 2: streamline upwinding, 3: discontinuous Galerkin, 4: Flux Correction Transport')) - string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) - - return string + s = ' Masstransport solution parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'spcthickness', 'thickness constraints (NaN means no constraint) [m]')) + s += '{}\n'.format(fielddisplay(self, 'isfreesurface', 'do we use free surfaces (FS only) or mass conservation')) + s += '{}\n'.format(fielddisplay(self, 'min_thickness', 'minimum ice thickness allowed [m]')) + s += '{}\n'.format(fielddisplay(self, 'hydrostatic_adjustment', 'adjustment of ice shelves surface and bed elevations: ''Incremental'' or ''Absolute'' ')) + s += '{}\n'.format(fielddisplay(self, 'stabilization', '0: no, 1: artificial_diffusivity, 2: streamline upwinding, 3: discontinuous Galerkin, 4: Flux Correction Transport')) + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested')) + return s #}}} def extrude(self, md): # {{{ @@ -50,21 +49,21 @@ #}}} def setdefaultparameters(self): # {{{ - #Type of stabilization to use 0:nothing 1:artificial_diffusivity 3:Discontinuous Galerkin + # Type of stabilization to use 0:nothing 1:artificial_diffusivity 3:Discontinuous Galerkin self.stabilization = 1 - #Factor applied to compute the penalties kappa = max(stiffness matrix) * 1.0**penalty_factor + # Factor applied to compute the penalties kappa = max(stiffness matrix) * 1.0**penalty_factor self.penalty_factor = 3 - #Minimum ice thickness that can be used + # Minimum ice thickness that can be used self.min_thickness = 1 - #Hydrostatic adjustment + # Hydrostatic adjustment self.hydrostatic_adjustment = 'Absolute' - #default output + # Default output self.requested_outputs = ['default'] return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if ('MasstransportAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.ismasstransport): return md @@ -89,7 +88,7 @@ WriteData(fid, prefix, 'object', self, 'fieldname', 'vertex_pairing', 'format', 'DoubleMat', 'mattype', 3) WriteData(fid, prefix, 'object', self, 'fieldname', 'penalty_factor', 'format', 'Double') - #process requested outputs + # Process requested outputs outputs = self.requested_outputs indices = [i for i, x in enumerate(outputs) if x == 'default'] if len(indices) > 0: Index: ../trunk-jpl/src/m/classes/inversion.py =================================================================== --- ../trunk-jpl/src/m/classes/inversion.py (revision 25687) +++ ../trunk-jpl/src/m/classes/inversion.py (revision 25688) @@ -1,94 +1,77 @@ import numpy as np + +from checkfield import checkfield +from fielddisplay import fielddisplay +from marshallcostfunctions import marshallcostfunctions from project3d import project3d -from fielddisplay import fielddisplay -from checkfield import checkfield -from WriteData import WriteData from supportedcontrols import supportedcontrols from supportedcostfunctions import supportedcostfunctions -from marshallcostfunctions import marshallcostfunctions +from WriteData import WriteData class inversion(object): - """ - INVERSION class definition + """INVERSION class definition - Usage: - inversion = inversion() + Usage: + inversion = inversion() """ def __init__(self): # {{{ self.iscontrol = 0 self.incomplete_adjoint = 0 - self.control_parameters = float('NaN') + self.control_parameters = np.nan self.nsteps = 0 - self.maxiter_per_step = float('NaN') + self.maxiter_per_step = np.nan self.cost_functions = '' - self.cost_functions_coefficients = float('NaN') - self.gradient_scaling = float('NaN') + self.cost_functions_coefficients = np.nan + self.gradient_scaling = np.nan self.cost_function_threshold = 0 - self.min_parameters = float('NaN') - self.max_parameters = float('NaN') - self.step_threshold = float('NaN') - self.vx_obs = float('NaN') - self.vy_obs = float('NaN') - self.vz_obs = float('NaN') - self.vel_obs = float('NaN') - self.thickness_obs = float('NaN') - self.surface_obs = float('NaN') + self.min_parameters = np.nan + self.max_parameters = np.nan + self.step_threshold = np.nan + self.vx_obs = np.nan + self.vy_obs = np.nan + self.vz_obs = np.nan + self.vel_obs = np.nan + self.thickness_obs = np.nan + self.surface_obs = np.nan - #set defaults self.setdefaultparameters() - #}}} def __repr__(self): # {{{ - string = ' inversion parameters:' - string = "%s\n%s" % (string, fielddisplay(self, 'iscontrol', 'is inversion activated?')) - string = "%s\n%s" % (string, fielddisplay(self, 'incomplete_adjoint', '1: linear viscosity, 0: non - linear viscosity')) - string = "%s\n%s" % (string, fielddisplay(self, 'control_parameters', 'ex: {''FrictionCoefficient''}, or {''MaterialsRheologyBbar''}')) - string = "%s\n%s" % (string, fielddisplay(self, 'nsteps', 'number of optimization searches')) - string = "%s\n%s" % (string, fielddisplay(self, 'cost_functions', 'indicate the type of response for each optimization step')) - string = "%s\n%s" % (string, fielddisplay(self, 'cost_functions_coefficients', 'cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) - string = "%s\n%s" % (string, fielddisplay(self, 'cost_function_threshold', 'misfit convergence criterion. Default is 1%, NaN if not applied')) - string = "%s\n%s" % (string, fielddisplay(self, 'maxiter_per_step', 'maximum iterations during each optimization step')) - string = "%s\n%s" % (string, fielddisplay(self, 'gradient_scaling', 'scaling factor on gradient direction during optimization, for each optimization step')) - string = "%s\n%s" % (string, fielddisplay(self, 'step_threshold', 'decrease threshold for misfit, default is 30%')) - string = "%s\n%s" % (string, fielddisplay(self, 'min_parameters', 'absolute minimum acceptable value of the inversed parameter on each vertex')) - string = "%s\n%s" % (string, fielddisplay(self, 'max_parameters', 'absolute maximum acceptable value of the inversed parameter on each vertex')) - string = "%s\n%s" % (string, fielddisplay(self, 'vx_obs', 'observed velocity x component [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'vy_obs', 'observed velocity y component [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'vel_obs', 'observed velocity magnitude [m / yr]')) - string = "%s\n%s" % (string, fielddisplay(self, 'thickness_obs', 'observed thickness [m]')) - string = "%s\n%s" % (string, fielddisplay(self, 'surface_obs', 'observed surface elevation [m]')) - string = "%s\n%s" % (string, 'Available cost functions:') - string = "%s\n%s" % (string, ' 101: SurfaceAbsVelMisfit') - string = "%s\n%s" % (string, ' 102: SurfaceRelVelMisfit') - string = "%s\n%s" % (string, ' 103: SurfaceLogVelMisfit') - string = "%s\n%s" % (string, ' 104: SurfaceLogVxVyMisfit') - string = "%s\n%s" % (string, ' 105: SurfaceAverageVelMisfit') - string = "%s\n%s" % (string, ' 201: ThicknessAbsMisfit') - string = "%s\n%s" % (string, ' 501: DragCoefficientAbsGradient') - string = "%s\n%s" % (string, ' 502: RheologyBbarAbsGradient') - string = "%s\n%s" % (string, ' 503: ThicknessAbsGradient') - return string + s = ' inversion parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'iscontrol', 'is inversion activated?')) + s += '{}\n'.format(fielddisplay(self, 'incomplete_adjoint', '1: linear viscosity, 0: non - linear viscosity')) + s += '{}\n'.format(fielddisplay(self, 'control_parameters', 'ex: {''FrictionCoefficient''}, or {''MaterialsRheologyBbar''}')) + s += '{}\n'.format(fielddisplay(self, 'nsteps', 'number of optimization searches')) + s += '{}\n'.format(fielddisplay(self, 'cost_functions', 'indicate the type of response for each optimization step')) + s += '{}\n'.format(fielddisplay(self, 'cost_functions_coefficients', 'cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) + s += '{}\n'.format(fielddisplay(self, 'cost_function_threshold', 'misfit convergence criterion. Default is 1%, NaN if not applied')) + s += '{}\n'.format(fielddisplay(self, 'maxiter_per_step', 'maximum iterations during each optimization step')) + s += '{}\n'.format(fielddisplay(self, 'gradient_scaling', 'scaling factor on gradient direction during optimization, for each optimization step')) + s += '{}\n'.format(fielddisplay(self, 'step_threshold', 'decrease threshold for misfit, default is 30%')) + s += '{}\n'.format(fielddisplay(self, 'min_parameters', 'absolute minimum acceptable value of the inversed parameter on each vertex')) + s += '{}\n'.format(fielddisplay(self, 'max_parameters', 'absolute maximum acceptable value of the inversed parameter on each vertex')) + s += '{}\n'.format(fielddisplay(self, 'vx_obs', 'observed velocity x component [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'vy_obs', 'observed velocity y component [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'vel_obs', 'observed velocity magnitude [m / yr]')) + s += '{}\n'.format(fielddisplay(self, 'thickness_obs', 'observed thickness [m]')) + s += '{}\n'.format(fielddisplay(self, 'surface_obs', 'observed surface elevation [m]')) + s += '{}\n'.format('Available cost functions:') + s += '{}\n'.format(' 101: SurfaceAbsVelMisfit') + s += '{}\n'.format(' 102: SurfaceRelVelMisfit') + s += '{}\n'.format(' 103: SurfaceLogVelMisfit') + s += '{}\n'.format(' 104: SurfaceLogVxVyMisfit') + s += '{}\n'.format(' 105: SurfaceAverageVelMisfit') + s += '{}\n'.format(' 201: ThicknessAbsMisfit') + s += '{}\n'.format(' 501: DragCoefficientAbsGradient') + s += '{}\n'.format(' 502: RheologyBbarAbsGradient') + s += '{}\n'.format(' 503: ThicknessAbsGradient') + return s #}}} - def extrude(self, md): # {{{ - self.vx_obs = project3d(md, 'vector', self.vx_obs, 'type', 'node') - self.vy_obs = project3d(md, 'vector', self.vy_obs, 'type', 'node') - self.vel_obs = project3d(md, 'vector', self.vel_obs, 'type', 'node') - self.thickness_obs = project3d(md, 'vector', self.thickness_obs, 'type', 'node') - if not np.any(np.isnan(self.cost_functions_coefficients)): - self.cost_functions_coefficients = project3d(md, 'vector', self.cost_functions_coefficients, 'type', 'node') - if not np.any(np.isnan(self.min_parameters)): - self.min_parameters = project3d(md, 'vector', self.min_parameters, 'type', 'node') - if not np.any(np.isnan(self.max_parameters)): - self.max_parameters = project3d(md, 'vector', self.max_parameters, 'type', 'node') - return self - #}}} - def setdefaultparameters(self): # {{{ - #default is incomplete adjoint for now self.incomplete_adjoint = 1 #parameter to be inferred by control methods (only @@ -114,13 +97,26 @@ #cost_function_threshold is a criteria to stop the control methods. #if J[n] - J[n - 1] / J[n] < criteria, the control run stops #NaN if not applied - self.cost_function_threshold = float('NaN') #not activated + self.cost_function_threshold = np.nan #not activated + return self + #}}} + def extrude(self, md): # {{{ + self.vx_obs = project3d(md, 'vector', self.vx_obs, 'type', 'node') + self.vy_obs = project3d(md, 'vector', self.vy_obs, 'type', 'node') + self.vel_obs = project3d(md, 'vector', self.vel_obs, 'type', 'node') + self.thickness_obs = project3d(md, 'vector', self.thickness_obs, 'type', 'node') + if not np.any(np.isnan(self.cost_functions_coefficients)): + self.cost_functions_coefficients = project3d(md, 'vector', self.cost_functions_coefficients, 'type', 'node') + if not np.any(np.isnan(self.min_parameters)): + self.min_parameters = project3d(md, 'vector', self.min_parameters, 'type', 'node') + if not np.any(np.isnan(self.max_parameters)): + self.max_parameters = project3d(md, 'vector', self.max_parameters, 'type', 'node') return self #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if not self.iscontrol: return md @@ -139,17 +135,17 @@ md = checkfield(md, 'fieldname', 'inversion.min_parameters', 'size', [md.mesh.numberofvertices, num_controls]) md = checkfield(md, 'fieldname', 'inversion.max_parameters', 'size', [md.mesh.numberofvertices, num_controls]) - #Only SSA, HO and FS are supported right now + # Only SSA, HO and FS are supported right now if solution == 'StressbalanceSolution': if not (md.flowequation.isSSA or md.flowequation.isHO or md.flowequation.isFS or md.flowequation.isL1L2): md.checkmessage("'inversion can only be performed for SSA, HO or FS ice flow models") - if solution == 'BalancethicknessSolution': md = checkfield(md, 'fieldname', 'inversion.thickness_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) + elif solution == 'BalancethicknessSoftSolution': + md = checkfield(md, 'fieldname', 'inversion.thickness_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) else: md = checkfield(md, 'fieldname', 'inversion.vx_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'inversion.vy_obs', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) - return md # }}} @@ -176,12 +172,12 @@ WriteData(fid, prefix, 'object', self, 'fieldname', 'thickness_obs', 'format', 'DoubleMat', 'mattype', 1) WriteData(fid, prefix, 'object', self, 'fieldname', 'surface_obs', 'format', 'DoubleMat', 'mattype', 1) - #process control parameters + # Process control parameters num_control_parameters = len(self.control_parameters) WriteData(fid, prefix, 'object', self, 'fieldname', 'control_parameters', 'format', 'StringArray') WriteData(fid, prefix, 'data', num_control_parameters, 'name', 'md.inversion.num_control_parameters', 'format', 'Integer') - #process cost functions + # Process cost functions num_cost_functions = np.size(self.cost_functions) data = marshallcostfunctions(self.cost_functions) WriteData(fid, prefix, 'data', data, 'name', 'md.inversion.cost_functions', 'format', 'StringArray') Index: ../trunk-jpl/src/m/classes/basalforcings.py =================================================================== --- ../trunk-jpl/src/m/classes/basalforcings.py (revision 25687) +++ ../trunk-jpl/src/m/classes/basalforcings.py (revision 25688) @@ -1,56 +1,48 @@ +import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay from project3d import project3d -from checkfield import checkfield from WriteData import WriteData -import numpy as np class basalforcings(object): - """ - BASAL FORCINGS class definition + """BASAL FORCINGS class definition - Usage: - basalforcings = basalforcings() + Usage: + basalforcings = basalforcings() """ def __init__(self): # {{{ - self.groundedice_melting_rate = float('NaN') - self.floatingice_melting_rate = float('NaN') - self.geothermalflux = float('NaN') + self.groundedice_melting_rate = np.nan + self.floatingice_melting_rate = np.nan + self.geothermalflux = np.nan - #set defaults self.setdefaultparameters() - #}}} def __repr__(self): # {{{ - string = " basal forcings parameters:" - - string = "%s\n%s" % (string, fielddisplay(self, "groundedice_melting_rate", "basal melting rate (positive if melting) [m / yr]")) - string = "%s\n%s" % (string, fielddisplay(self, "floatingice_melting_rate", "basal melting rate (positive if melting) [m / yr]")) - string = "%s\n%s" % (string, fielddisplay(self, "geothermalflux", "geothermal heat flux [W / m^2]")) - return string + s = ' basal forcings parameters:\n' + s += '{}\n'.format(fielddisplay(self, "groundedice_melting_rate", "basal melting rate (positive if melting) [m / yr]")) + s += '{}\n'.format(fielddisplay(self, "floatingice_melting_rate", "basal melting rate (positive if melting) [m / yr]")) + s += '{}\n'.format(fielddisplay(self, "geothermalflux", "geothermal heat flux [W / m^2]")) + return s #}}} def extrude(self, md): # {{{ self.groundedice_melting_rate = project3d(md, 'vector', self.groundedice_melting_rate, 'type', 'node', 'layer', 1) self.floatingice_melting_rate = project3d(md, 'vector', self.floatingice_melting_rate, 'type', 'node', 'layer', 1) - self.geothermalflux = project3d(md, 'vector', self.geothermalflux, 'type', 'node', 'layer', 1) #bedrock only gets geothermal flux + self.geothermalflux = project3d(md, 'vector', self.geothermalflux, 'type', 'node', 'layer', 1) # Bedrock only gets geothermal flux return self #}}} def initialize(self, md): # {{{ if np.all(np.isnan(self.groundedice_melting_rate)): - self.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices)) + self.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices, )) print(" no basalforcings.groundedice_melting_rate specified: values set as zero") - if np.all(np.isnan(self.floatingice_melting_rate)): - self.floatingice_melting_rate = np.zeros((md.mesh.numberofvertices)) + self.floatingice_melting_rate = np.zeros((md.mesh.numberofvertices, )) print(" no basalforcings.floatingice_melting_rate specified: values set as zero") - #if np.all(np.isnan(self.geothermalflux)): - #self.geothermalflux = np.zeros((md.mesh.numberofvertices)) - #print " no basalforcings.geothermalflux specified: values set as zero" - return self #}}} @@ -59,27 +51,21 @@ #}}} def checkconsistency(self, md, solution, analyses): # {{{ - - if 'MasstransportAnalysis' in analyses and not (solution == 'TransientSolution' and not md.transient.ismasstransport): + if 'MasstransportAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.ismasstransport: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.floatingice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) - if 'BalancethicknessAnalysis' in analyses: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'basalforcings.floatingice_melting_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) - - if 'ThermalAnalysis' in analyses and not (solution == 'TransientSolution' and not md.transient.isthermal): + if 'ThermalAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.isthermal: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.floatingice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.geothermalflux', 'NaN', 1, 'Inf', 1, 'timeseries', 1, '>=', 0) - return md # }}} def marshall(self, prefix, md, fid): # {{{ - yts = md.constants.yts - WriteData(fid, prefix, 'name', 'md.basalforcings.model', 'data', 1, 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'fieldname', 'groundedice_melting_rate', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'floatingice_melting_rate', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) Index: ../trunk-jpl/src/m/classes/thermal.py =================================================================== --- ../trunk-jpl/src/m/classes/thermal.py (revision 25687) +++ ../trunk-jpl/src/m/classes/thermal.py (revision 25688) @@ -1,13 +1,13 @@ import numpy as np + +from checkfield import checkfield +from fielddisplay import fielddisplay from project3d import project3d -from fielddisplay import fielddisplay -from checkfield import checkfield from WriteData import WriteData class thermal(object): - """ - THERMAL class definition + """THERMAL class definition Usage: thermal = thermal() @@ -27,26 +27,23 @@ self.watercolumn_upperlimit = 0 self.fe = 'P1' self.requested_outputs = [] - - #set defaults self.setdefaultparameters() - #}}} def __repr__(self): # {{{ - string = ' Thermal solution parameters:' - string = "%s\n%s" % (string, fielddisplay(self, 'spctemperature', 'temperature constraints (NaN means no constraint) [K]')) - string = "%s\n%s" % (string, fielddisplay(self, 'stabilization', '0: no, 1: artificial_diffusivity, 2: SUPG')) - string = "%s\n%s" % (string, fielddisplay(self, 'maxiter', 'maximum number of non linear iterations')) - string = "%s\n%s" % (string, fielddisplay(self, 'reltol', 'relative tolerance criterion')) - string = "%s\n%s" % (string, fielddisplay(self, 'penalty_lock', 'stabilize unstable thermal constraints that keep zigzagging after n iteration (default is 0, no stabilization)')) - string = "%s\n%s" % (string, fielddisplay(self, 'penalty_threshold', 'threshold to declare convergence of thermal solution (default is 0)')) - string = "%s\n%s" % (string, fielddisplay(self, 'isenthalpy', 'use an enthalpy formulation to include temperate ice (default is 0)')) - string = "%s\n%s" % (string, fielddisplay(self, 'isdynamicbasalspc', 'enable dynamic setting of basal forcing. required for enthalpy formulation (default is 0)')) - string = "%s\n%s" % (string, fielddisplay(self, 'isdrainicecolumn', 'wether waterfraction drainage is enabled for enthalpy formulation (default is 1)')) - string = "%s\n%s" % (string, fielddisplay(self, 'watercolumn_upperlimit', 'upper limit of basal watercolumn for enthalpy formulation (default is 1000m)')) - string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) - return string + s = ' Thermal solution parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'spctemperature', 'temperature constraints (NaN means no constraint) [K]')) + s += '{}\n'.format(fielddisplay(self, 'stabilization', '0: no, 1: artificial_diffusivity, 2: SUPG')) + s += '{}\n'.format(fielddisplay(self, 'maxiter', 'maximum number of non linear iterations')) + s += '{}\n'.format(fielddisplay(self, 'reltol', 'relative tolerance criterion')) + s += '{}\n'.format(fielddisplay(self, 'penalty_lock', 'stabilize unstable thermal constraints that keep zigzagging after n iteration (default is 0, no stabilization)')) + s += '{}\n'.format(fielddisplay(self, 'penalty_threshold', 'threshold to declare convergence of thermal solution (default is 0)')) + s += '{}\n'.format(fielddisplay(self, 'isenthalpy', 'use an enthalpy formulation to include temperate ice (default is 0)')) + s += '{}\n'.format(fielddisplay(self, 'isdynamicbasalspc', 'enable dynamic setting of basal forcing. required for enthalpy formulation (default is 0)')) + s += '{}\n'.format(fielddisplay(self, 'isdrainicecolumn', 'wether waterfraction drainage is enabled for enthalpy formulation (default is 1)')) + s += '{}\n'.format(fielddisplay(self, 'watercolumn_upperlimit', 'upper limit of basal watercolumn for enthalpy formulation (default is 1000m)')) + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested')) + return s #}}} def extrude(self, md): # {{{ @@ -63,7 +60,6 @@ return ['Enthalpy', 'Temperature', 'Waterfraction', 'Watercolumn', 'BasalforcingsGroundediceMeltingRate'] else: return ['Temperature', 'BasalforcingsGroundediceMeltingRate'] - #}}} def setdefaultparameters(self): # {{{ @@ -90,18 +86,15 @@ #default output self.requested_outputs = ['default'] return self - #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if ('ThermalAnalysis' not in analyses and 'EnthalpyAnalysis' not in analyses) or (solution == 'TransientSolution' and not md.transient.isthermal): return md - md = checkfield(md, 'fieldname', 'thermal.stabilization', 'numel', [1], 'values', [0, 1, 2]) md = checkfield(md, 'fieldname', 'thermal.spctemperature', 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'thermal.requested_outputs', 'stringrow', 1) - if 'EnthalpyAnalysis' in analyses and md.thermal.isenthalpy and md.mesh.dimension() == 3: md = checkfield(md, 'fieldname', 'thermal.isdrainicecolumn', 'numel', [1], 'values', [0, 1]) md = checkfield(md, 'fieldname', 'thermal.watercolumn_upperlimit', '>=', 0) @@ -122,7 +115,6 @@ if np.isnan(md.stressbalance.reltol): md.checkmessage("for a steadystate computation, thermal.reltol (relative convergence criterion) must be defined!") md = checkfield(md, 'fieldname', 'thermal.reltol', '>', 0., 'message', "reltol must be larger than zero") - return md # }}} Index: ../trunk-jpl/src/m/classes/frictionjosh.py =================================================================== --- ../trunk-jpl/src/m/classes/frictionjosh.py (revision 25687) +++ ../trunk-jpl/src/m/classes/frictionjosh.py (revision 25688) @@ -1,3 +1,5 @@ +import numpy as np + from checkfield import checkfield from fielddisplay import fielddisplay from project3d import project3d @@ -5,32 +7,31 @@ class frictionjosh(object): - ''' - FRICTION class definition + """FRICTIONJOSH class definition - Usage: - frictionjosh = frictionjosh() - ''' + Usage: + frictionjosh = frictionjosh() + """ def __init__(self): # {{{ - self.coefficient = float('NaN') - self.pressure_adjusted_temperature = float('NaN') + self.coefficient = np.nan + self.pressure_adjusted_temperature = np.nan self.gamma = 0 self.effective_pressure_limit = 0 - #set defaults + self.setdefaultparameters() #self.requested_outputs = [] #}}} def __repr__(self): # {{{ - string = "Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s - 1) * u_b, \n(effective stress Neff = rho_ice * g * thickness + rho_water * g * base, r = q / p and s = 1 / p)" - - string = "%s\n%s" % (string, fielddisplay(self, "coefficient", "friction coefficient [SI]")) - string = "%s\n%s" % (string, fielddisplay(self, 'pressure_adjusted_temperature', 'friction pressure_adjusted_temperature (T - Tpmp) [K]')) - string = "%s\n%s" % (string, fielddisplay(self, 'gamma', '(T - Tpmp)/gamma [K]')) - string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)')) - #string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) - return string + s = 'Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s - 1) * u_b,\n' + s += '(effective stress Neff = rho_ice * g * thickness + rho_water * g * base, r = q / p and s = 1 / p)\n' + s += '{}\n'.format(fielddisplay(self, "coefficient", "friction coefficient [SI]")) + s += '{}\n'.format(fielddisplay(self, 'pressure_adjusted_temperature', 'friction pressure_adjusted_temperature (T - Tpmp) [K]')) + s += '{}\n'.format(fielddisplay(self, 'gamma', '(T - Tpmp)/gamma [K]')) + s += '{}\n'.format(fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)')) + #s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested')) + return s #}}} def extrude(self, md): # {{{ @@ -52,16 +53,14 @@ #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return + # Early return if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: return md - md = checkfield(md, 'fieldname', 'friction.coefficient', 'timeseries', 1, 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'friction.pressure_adjusted_temperature','NaN',1,'Inf',1) md = checkfield(md, 'fieldname', 'friction.gamma','numel',1,'NaN',1,'Inf',1,'>',0.) md = checkfield(md, 'fieldname', 'friction.effective_pressure_limit', 'numel', [1], '>=', 0) - - #Check that temperature is provided + # Check that temperature is provided md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'Inf',1,'size','universal') return md # }}} @@ -72,5 +71,4 @@ WriteData(fid,prefix,'class','friction','object',self,'fieldname','pressure_adjusted_temperature','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts) WriteData(fid,prefix,'class','friction','object',self,'fieldname','gamma','format','Double') WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double') - # }}} Index: ../trunk-jpl/src/m/classes/transient.py =================================================================== --- ../trunk-jpl/src/m/classes/transient.py (revision 25687) +++ ../trunk-jpl/src/m/classes/transient.py (revision 25688) @@ -1,83 +1,79 @@ +from checkfield import checkfield from fielddisplay import fielddisplay -from checkfield import checkfield from WriteData import WriteData class transient(object): - """ - TRANSIENT class definition + """TRANSIENT class definition - Usage: - transient = transient() + Usage: + transient = transient() """ def __init__(self): # {{{ - self.issmb = False - self.ismasstransport = False - self.isstressbalance = False - self.isthermal = False - self.isgroundingline = False - self.isgia = False - self.isesa = False - self.isdamageevolution = False - self.ismovingfront = False - self.ishydrology = False - self.isslr = False - self.iscoupler = False + self.issmb = 0 + self.ismasstransport = 0 + self.isstressbalance = 0 + self.isthermal = 0 + self.isgroundingline = 0 + self.isgia = 0 + self.isesa = 0 + self.isdamageevolution = 0 + self.ismovingfront = 0 + self.ishydrology = 0 + self.isslr = 0 + self.iscoupler = 0 self.amr_frequency = 0 - self.isoceancoupling = False + self.isoceancoupling = 0 self.requested_outputs = [] - #set defaults self.setdefaultparameters() + #}}} - #}}} def __repr__(self): # {{{ - string = ' transient solution parameters:' - string = "%s\n%s" % (string, fielddisplay(self, 'issmb', 'indicates if a surface mass balance solution is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'ismasstransport', 'indicates if a masstransport solution is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'isstressbalance', 'indicates if a stressbalance solution is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'isthermal', 'indicates if a thermal solution is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'isgroundingline', 'indicates if a groundingline migration is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'isgia', 'indicates if a postglacial rebound is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'isesa', 'indicates whether an elastic adjustment model is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'isdamageevolution', 'indicates whether damage evolution is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'ismovingfront', 'indicates whether a moving front capability is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'ishydrology', 'indicates whether an hydrology model is used')) - string = "%s\n%s" % (string, fielddisplay(self, 'isslr', 'indicates if a sea level rise solution is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'isoceancoupling', 'indicates whether coupling with an ocean model is used in the transient')) - string = "%s\n%s" % (string, fielddisplay(self, 'iscoupler', 'indicates whether different models are being run with need for coupling')) - string = "%s\n%s" % (string, fielddisplay(self, 'amr_frequency', 'frequency at which mesh is refined in simulations with multiple time_steps')) - string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'list of additional outputs requested')) - return string + s = ' transient solution parameters:\n' + s += '{}\n'.format(fielddisplay(self, 'issmb', 'indicates if a surface mass balance solution is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'ismasstransport', 'indicates if a masstransport solution is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'isstressbalance', 'indicates if a stressbalance solution is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'isthermal', 'indicates if a thermal solution is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'isgroundingline', 'indicates if a groundingline migration is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'isgia', 'indicates if a postglacial rebound is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'isesa', 'indicates whether an elastic adjustment model is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'isdamageevolution', 'indicates whether damage evolution is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'ismovingfront', 'indicates whether a moving front capability is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'ishydrology', 'indicates whether an hydrology model is used')) + s += '{}\n'.format(fielddisplay(self, 'isslr', 'indicates if a sea level rise solution is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'isoceancoupling', 'indicates whether coupling with an ocean model is used in the transient')) + s += '{}\n'.format(fielddisplay(self, 'iscoupler', 'indicates whether different models are being run with need for coupling')) + s += '{}\n'.format(fielddisplay(self, 'amr_frequency', 'frequency at which mesh is refined in simulations with multiple time_steps')) + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'list of additional outputs requested')) + return s #}}} def defaultoutputs(self, md): # {{{ - if self.issmb: return ['SmbMassBalance'] else: return [] - #}}} def deactivateall(self): #{{{ - self.issmb = False - self.ismasstransport = False - self.isstressbalance = False - self.isthermal = False - self.isgroundingline = False - self.isgia = False - self.isesa = False - self.isdamageevolution = False - self.ismovingfront = False - self.ishydrology = False - self.isslr = False - self.isoceancoupling = False - self.iscoupler = False + self.issmb = 0 + self.ismasstransport = 0 + self.isstressbalance = 0 + self.isthermal = 0 + self.isgroundingline = 0 + self.isgia = 0 + self.isesa = 0 + self.isdamageevolution = 0 + self.ismovingfront = 0 + self.ishydrology = 0 + self.isslr = 0 + self.isoceancoupling = 0 + self.iscoupler = 0 self.amr_frequency = 0 - #default output + # Default output self.requested_outputs = [] return self #}}} @@ -84,22 +80,22 @@ def setdefaultparameters(self): # {{{ #full analysis: Stressbalance, Masstransport and Thermal but no groundingline migration for now - self.issmb = True - self.ismasstransport = True - self.isstressbalance = True - self.isthermal = True - self.isgroundingline = False - self.isgia = False - self.isesa = False - self.isdamageevolution = False - self.ismovingfront = False - self.ishydrology = False - self.isslr = False - self.isoceancoupling = False - self.iscoupler = False + self.issmb = 1 + self.ismasstransport = 1 + self.isstressbalance = 1 + self.isthermal = 1 + self.isgroundingline = 0 + self.isgia = 0 + self.isesa = 0 + self.isdamageevolution = 0 + self.ismovingfront = 0 + self.ishydrology = 0 + self.isslr = 0 + self.isoceancoupling = 0 + self.iscoupler = 0 self.amr_frequency = 0 - #default output + # Default output self.requested_outputs = ['default'] return self #}}} @@ -123,13 +119,11 @@ md = checkfield(md, 'fieldname', 'transient.isoceancoupling', 'numel', [1], 'values', [0, 1]) md = checkfield(md, 'fieldname', 'transient.iscoupler', 'numel', [1], 'values', [0, 1]) md = checkfield(md, 'fieldname', 'transient.amr_frequency', 'numel', [1], '>=', 0, 'NaN', 1, 'Inf', 1) - md = checkfield(md, 'fieldname', 'transient.requested_outputs', 'stringrow', 1) - if (solution != 'TransientSolution') and (md.transient.iscoupling): + if solution != 'TransientSolution' and md.transient.iscoupling: md.checkmessage("Coupling with ocean can only be done in transient simulations!") - if (md.transient.isdamageevolution and not hasattr(md.materials, 'matdamageice')): + if md.transient.isdamageevolution and not hasattr(md.materials, 'matdamageice'): md.checkmessage("requesting damage evolution but md.materials is not of class matdamageice") - return md # }}} @@ -149,7 +143,7 @@ WriteData(fid, prefix, 'object', self, 'fieldname', 'iscoupler', 'format', 'Boolean') WriteData(fid, prefix, 'object', self, 'fieldname', 'amr_frequency', 'format', 'Integer') - #process requested outputs + # Process requested outputs outputs = self.requested_outputs indices = [i for i, x in enumerate(outputs) if x == 'default'] if len(indices) > 0: Index: ../trunk-jpl/src/m/classes/taoinversion.py =================================================================== --- ../trunk-jpl/src/m/classes/taoinversion.py (revision 25687) +++ ../trunk-jpl/src/m/classes/taoinversion.py (revision 25688) @@ -1,18 +1,25 @@ import numpy as np -from project3d import project3d -from WriteData import WriteData + from checkfield import checkfield from IssmConfig import IssmConfig from marshallcostfunctions import marshallcostfunctions +from project3d import project3d from supportedcontrols import * from supportedcostfunctions import * +from WriteData import WriteData -class taoinversion(object): +class taoinversion(object): #{{{ + """TAOINVERSION class definition + + Usage: + inversion = taoinversion() + """ + def __init__(self): self.iscontrol = 0 self.incomplete_adjoint = 0 - self.control_parameters = float('NaN') + self.control_parameters = np.nan self.maxsteps = 0 self.maxiter = 0 self.fatol = 0 @@ -21,54 +28,56 @@ self.grtol = 0 self.gttol = 0 self.algorithm = '' - self.cost_functions = float('NaN') - self.cost_functions_coefficients = float('NaN') - self.min_parameters = float('NaN') - self.max_parameters = float('NaN') - self.vx_obs = float('NaN') - self.vy_obs = float('NaN') - self.vz_obs = float('NaN') - self.vel_obs = float('NaN') - self.thickness_obs = float('NaN') - self.surface_obs = float('NaN') + self.cost_functions = np.nan + self.cost_functions_coefficients = np.nan + self.min_parameters = np.nan + self.max_parameters = np.nan + self.vx_obs = np.nan + self.vy_obs = np.nan + self.vz_obs = np.nan + self.vel_obs = np.nan + self.thickness_obs = np.nan + self.surface_obs = np.nan + self.setdefaultparameters() + #}}} def __repr__(self): - string = ' taoinversion parameters:' - string = "%s\n%s" % (string, fieldstring(self, 'iscontrol', 'is inversion activated?')) - string = "%s\n%s" % (string, fieldstring(self, 'mantle_viscosity', 'mantle viscosity constraints (NaN means no constraint) (Pa s)')) - string = "%s\n%s" % (string, fieldstring(self, 'lithosphere_thickness', 'lithosphere thickness constraints (NaN means no constraint) (m)')) - string = "%s\n%s" % (string, fieldstring(self, 'cross_section_shape', "1: square-edged, 2: elliptical - edged surface")) - string = "%s\n%s" % (string, fieldstring(self, 'incomplete_adjoint', '1: linear viscosity, 0: non - linear viscosity')) - string = "%s\n%s" % (string, fieldstring(self, 'control_parameters', 'ex: {''FrictionCoefficient''}, or {''MaterialsRheologyBbar''}')) - string = "%s\n%s" % (string, fieldstring(self, 'maxsteps', 'maximum number of iterations (gradient computation)')) - string = "%s\n%s" % (string, fieldstring(self, 'maxiter', 'maximum number of Function evaluation (forward run)')) - string = "%s\n%s" % (string, fieldstring(self, 'fatol', 'convergence criterion: f(X) - f(X * ) (X: current iteration, X * : "true" solution, f: cost function)')) - string = "%s\n%s" % (string, fieldstring(self, 'frtol', 'convergence criterion: |f(X) - f(X * )| / |f(X * )|')) - string = "%s\n%s" % (string, fieldstring(self, 'gatol', 'convergence criterion: ||g(X)|| (g: gradient of the cost function)')) - string = "%s\n%s" % (string, fieldstring(self, 'grtol', 'convergence criterion: ||g(X)|| / |f(X)|')) - string = "%s\n%s" % (string, fieldstring(self, 'gttol', 'convergence criterion: ||g(X)|| / ||g(X0)|| (g(X0): gradient at initial guess X0)')) - string = "%s\n%s" % (string, fieldstring(self, 'algorithm', 'minimization algorithm: ''tao_blmvm'', ''tao_cg'', ''tao_lmvm''')) - string = "%s\n%s" % (string, fieldstring(self, 'cost_functions', 'indicate the type of response for each optimization step')) - string = "%s\n%s" % (string, fieldstring(self, 'cost_functions_coefficients', 'cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) - string = "%s\n%s" % (string, fieldstring(self, 'min_parameters', 'absolute minimum acceptable value of the inversed parameter on each vertex')) - string = "%s\n%s" % (string, fieldstring(self, 'max_parameters', 'absolute maximum acceptable value of the inversed parameter on each vertex')) - string = "%s\n%s" % (string, fieldstring(self, 'vx_obs', 'observed velocity x component [m / yr]')) - string = "%s\n%s" % (string, fieldstring(self, 'vy_obs', 'observed velocity y component [m / yr]')) - string = "%s\n%s" % (string, fieldstring(self, 'vel_obs', 'observed velocity magnitude [m / yr]')) - string = "%s\n%s" % (string, fieldstring(self, 'thickness_obs', 'observed thickness [m]')) - string = "%s\n%s" % (string, fieldstring(self, 'surface_obs', 'observed surface elevation [m]')) - string = "%s\n%s" % (string, 'Available cost functions:') - string = "%s\n%s" % (string, ' 101: SurfaceAbsVelMisfit') - string = "%s\n%s" % (string, ' 102: SurfaceRelVelMisfit') - string = "%s\n%s" % (string, ' 103: SurfaceLogVelMisfit') - string = "%s\n%s" % (string, ' 104: SurfaceLogVxVyMisfit') - string = "%s\n%s" % (string, ' 105: SurfaceAverageVelMisfit') - string = "%s\n%s" % (string, ' 201: ThicknessAbsMisfit') - string = "%s\n%s" % (string, ' 501: DragCoefficientAbsGradient') - string = "%s\n%s" % (string, ' 502: RheologyBbarAbsGradient') - string = "%s\n%s" % (string, ' 503: ThicknessAbsGradient') - return string + s = ' taoinversion parameters:\n' + s += '{}'.format(fieldstring(self, 'iscontrol', 'is inversion activated?')) + s += '{}'.format(fieldstring(self, 'mantle_viscosity', 'mantle viscosity constraints (NaN means no constraint) (Pa s)')) + s += '{}'.format(fieldstring(self, 'lithosphere_thickness', 'lithosphere thickness constraints (NaN means no constraint) (m)')) + s += '{}'.format(fieldstring(self, 'cross_section_shape', "1: square-edged, 2: elliptical - edged surface")) + s += '{}'.format(fieldstring(self, 'incomplete_adjoint', '1: linear viscosity, 0: non - linear viscosity')) + s += '{}'.format(fieldstring(self, 'control_parameters', 'ex: {''FrictionCoefficient''}, or {''MaterialsRheologyBbar''}')) + s += '{}'.format(fieldstring(self, 'maxsteps', 'maximum number of iterations (gradient computation)')) + s += '{}'.format(fieldstring(self, 'maxiter', 'maximum number of Function evaluation (forward run)')) + s += '{}'.format(fieldstring(self, 'fatol', 'convergence criterion: f(X) - f(X * ) (X: current iteration, X * : "true" solution, f: cost function)')) + s += '{}'.format(fieldstring(self, 'frtol', 'convergence criterion: |f(X) - f(X * )| / |f(X * )|')) + s += '{}'.format(fieldstring(self, 'gatol', 'convergence criterion: ||g(X)|| (g: gradient of the cost function)')) + s += '{}'.format(fieldstring(self, 'grtol', 'convergence criterion: ||g(X)|| / |f(X)|')) + s += '{}'.format(fieldstring(self, 'gttol', 'convergence criterion: ||g(X)|| / ||g(X0)|| (g(X0): gradient at initial guess X0)')) + s += '{}'.format(fieldstring(self, 'algorithm', 'minimization algorithm: ''tao_blmvm'', ''tao_cg'', ''tao_lmvm''')) + s += '{}'.format(fieldstring(self, 'cost_functions', 'indicate the type of response for each optimization step')) + s += '{}'.format(fieldstring(self, 'cost_functions_coefficients', 'cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) + s += '{}'.format(fieldstring(self, 'min_parameters', 'absolute minimum acceptable value of the inversed parameter on each vertex')) + s += '{}'.format(fieldstring(self, 'max_parameters', 'absolute maximum acceptable value of the inversed parameter on each vertex')) + s += '{}'.format(fieldstring(self, 'vx_obs', 'observed velocity x component [m / yr]')) + s += '{}'.format(fieldstring(self, 'vy_obs', 'observed velocity y component [m / yr]')) + s += '{}'.format(fieldstring(self, 'vel_obs', 'observed velocity magnitude [m / yr]')) + s += '{}'.format(fieldstring(self, 'thickness_obs', 'observed thickness [m]')) + s += '{}'.format(fieldstring(self, 'surface_obs', 'observed surface elevation [m]')) + s += '{}'.format('Available cost functions:') + s += '{}'.format(' 101: SurfaceAbsVelMisfit') + s += '{}'.format(' 102: SurfaceRelVelMisfit') + s += '{}'.format(' 103: SurfaceLogVelMisfit') + s += '{}'.format(' 104: SurfaceLogVxVyMisfit') + s += '{}'.format(' 105: SurfaceAverageVelMisfit') + s += '{}'.format(' 201: ThicknessAbsMisfit') + s += '{}'.format(' 501: DragCoefficientAbsGradient') + s += '{}'.format(' 502: RheologyBbarAbsGradient') + s += '{}'.format(' 503: ThicknessAbsGradient') + return s def setdefaultparameters(self): #default is incomplete adjoint for now Index: ../trunk-jpl/src/m/classes/SMBcomponents.py =================================================================== --- ../trunk-jpl/src/m/classes/SMBcomponents.py (revision 25687) +++ ../trunk-jpl/src/m/classes/SMBcomponents.py (revision 25688) @@ -1,40 +1,47 @@ +import numpy as np + +from checkfield import * from fielddisplay import fielddisplay -from checkfield import * from project3d import * from WriteData import * class SMBcomponents(object): - """ - SMBcomponents Class definition + """SMBCOMPONENTS class definition - Usage: - SMBcomponents = SMBcomponents() + Usage: + SMBcomponents = SMBcomponents() """ - def __init__(self): # {{{ - self.accumulation = float('NaN') - self.runoff = float('NaN') - self.evaporation = float('NaN') + def __init__(self, *args): # {{{ self.isclimatology = 0 + self.accumulation = np.nan + self.runoff = np.nan + self.evaporation = np.nan self.steps_per_step = 1 self.averaging = 0 self.requested_outputs = [] + + nargs = len(args) + if nargs == 0: + pass + else: + raise Exception('constructor not supported') # }}} def __repr__(self): # {{{ - string = " surface forcings parameters (SMB = accumulation-runoff-evaporation) :" - string = "%s\n%s" % (string, fielddisplay(self, 'accumulation', 'accumulated snow [m/yr ice eq]')) - string = "%s\n%s" % (string, fielddisplay(self, 'runoff', 'amount of ice melt lost from the ice column [m/yr ice eq]')) - string = "%s\n%s" % (string, fielddisplay(self, 'evaporation', 'mount of ice lost to evaporative processes [m/yr ice eq]')) - string = "%s\n%s" % (string, fielddisplay(self, 'isclimatology', 'repeat all forcings when past last forcing time (default false)')) - string = "%s\n%s" % (string, fielddisplay(self, 'steps_per_step', 'number of smb steps per time step')) - string = "%s\n%s" % (string, fielddisplay(self, 'averaging', 'averaging methods from short to long steps')) - string = "%s\n\t\t%s" % (string, '0: Arithmetic (default)') - string = "%s\n\t\t%s" % (string, '1: Geometric') - string = "%s\n\t\t%s" % (string, '2: Harmonic') - string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) - return string + s = ' surface forcings parameters (SMB = accumulation-runoff-evaporation):\n' + s += '{}\n'.format(fielddisplay(self, 'accumulation', 'accumulated snow [m/yr ice eq]')) + s += '{}\n'.format(fielddisplay(self, 'runoff', 'amount of ice melt lost from the ice column [m/yr ice eq]')) + s += '{}\n'.format(fielddisplay(self, 'evaporation', 'mount of ice lost to evaporative processes [m/yr ice eq]')) + s += '{}\n'.format(fielddisplay(self, 'isclimatology', 'repeat all forcings when past last forcing time (default false)')) + s += '{}\n'.format(fielddisplay(self, 'steps_per_step', 'number of smb steps per time step')) + s += '{}\n'.format(fielddisplay(self, 'averaging', 'averaging methods from short to long steps')) + s += '\t\t{}\n'.format('0: Arithmetic (default)') + s += '\t\t{}\n'.format('1: Geometric') + s += '\t\t{}\n'.format('2: Harmonic') + s += '{}\n'.format(fielddisplay(self, 'requested_outputs', 'additional outputs requested')) + return s # }}} def extrude(self, md): # {{{ @@ -52,7 +59,6 @@ if np.all(np.isnan(self.accumulation)): self.accumulation = np.zeros((md.mesh.numberofvertices)) print(" no SMB.accumulation specified: values set as zero") - if np.all(np.isnan(self.runoff)): self.runoff = np.zeros((md.mesh.numberofvertices)) print(" no SMB.runoff specified: values set as zero") @@ -60,7 +66,6 @@ if np.all(np.isnan(self.evaporation)): self.evaporation = np.zeros((md.mesh.numberofvertices)) print(" no SMB.evaporation specified: values set as zero") - return self # }}} @@ -73,12 +78,15 @@ md = checkfield(md, 'fieldname', 'smb.accumulation', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'smb.runoff', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'smb.evaporation', 'size', [md.mesh.numberofvertices], 'NaN', 1, 'Inf', 1) - md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1]) md = checkfield(md, 'fieldname', 'smb.averaging', 'numel', [1], 'values', [0, 1, 2]) md = checkfield(md, 'fieldname', 'masstransport.requested_outputs', 'stringrow', 1) md = checkfield(md, 'fieldname', 'smb.isclimatology', 'values', [0, 1]) - + if self.isclimatology: + md = checkfield(md, 'fieldname', 'smb.accumulation', 'size', [md.mesh.numberofvertices + 1], + 'message', 'accumulation must have md.mesh.numberofvertices+1 rows in order to force a climatology') + md = checkfield(md, 'fieldname', 'smb.runoff', 'size', [md.mesh.numberofvertices + 1], 'message', 'runoff must have md.mesh.numberofvertices+1 rows in order to force a climatology') + md = checkfield(md, 'fieldname', 'smb.evaporation', 'size', [md.mesh.numberofvertices + 1], 'message', 'evaporation must have md.mesh.numberofvertices+1 rows in order to force a climatology') return md # }}} Index: ../trunk-jpl/src/m/classes/mismipbasalforcings.py =================================================================== --- ../trunk-jpl/src/m/classes/mismipbasalforcings.py (revision 25687) +++ ../trunk-jpl/src/m/classes/mismipbasalforcings.py (revision 25688) @@ -1,36 +1,35 @@ +import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay from project3d import project3d -from checkfield import checkfield from WriteData import WriteData -import numpy as np class mismipbasalforcings(object): - """ - MISMIP Basal Forcings class definition + """MISMIP Basal Forcings class definition Usage: - mismipbasalforcings = mismipbasalforcings() + mismipbasalforcings = mismipbasalforcings() """ def __init__(self): # {{{ - self.groundedice_melting_rate = float('NaN') - self.meltrate_factor = float('NaN') - self.threshold_thickness = float('NaN') - self.upperdepth_melt = float('NaN') - self.geothermalflux = float('NaN') + self.groundedice_melting_rate = np.nan + self.meltrate_factor = np.nan + self.threshold_thickness = np.nan + self.upperdepth_melt = np.nan + self.geothermalflux = np.nan self.setdefaultparameters() - #}}} def __repr__(self): # {{{ - string = " MISMIP + basal melt parameterization\n" - string = "%s\n%s" % (string, fielddisplay(self, "groundedice_melting_rate", "basal melting rate (positive if melting) [m / yr]")) - string = "%s\n%s" % (string, fielddisplay(self, "meltrate_factor", "Melt - rate rate factor [1 / yr] (sign is opposite to MISMIP + benchmark to remain consistent with ISSM convention of positive values for melting)")) - string = "%s\n%s" % (string, fielddisplay(self, "threshold_thickness", "Threshold thickness for saturation of basal melting [m]")) - string = "%s\n%s" % (string, fielddisplay(self, "upperdepth_melt", "Depth above which melt rate is zero [m]")) - string = "%s\n%s" % (string, fielddisplay(self, "geothermalflux", "Geothermal heat flux [W / m^2]")) - return string + s = ' MISMIP + basal melt parameterization\n' + s += '{}\n'.format(fielddisplay(self, "groundedice_melting_rate", "basal melting rate (positive if melting) [m / yr]")) + s += '{}\n'.format(fielddisplay(self, "meltrate_factor", "Melt - rate rate factor [1 / yr] (sign is opposite to MISMIP + benchmark to remain consistent with ISSM convention of positive values for melting)")) + s += '{}\n'.format(fielddisplay(self, "threshold_thickness", "Threshold thickness for saturation of basal melting [m]")) + s += '{}\n'.format(fielddisplay(self, "upperdepth_melt", "Depth above which melt rate is zero [m]")) + s += '{}\n'.format(fielddisplay(self, "geothermalflux", "Geothermal heat flux [W / m^2]")) + return s #}}} def extrude(self, md): # {{{ @@ -42,7 +41,7 @@ def initialize(self, md): # {{{ if np.all(np.isnan(self.groundedice_melting_rate)): self.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices)) - print(' no basalforcings.groundedice_melting_rate specified: values set as zero') + print('no basalforcings.groundedice_melting_rate specified: values set as zero') if np.all(np.isnan(self.geothermalflux)): self.geothermalflux = np.zeros((md.mesh.numberofvertices)) print(" no basalforcings.geothermalflux specified: values set as zero") @@ -58,20 +57,18 @@ #}}} def checkconsistency(self, md, solution, analyses): # {{{ - #Early return - if 'MasstransportAnalysis' in analyses and not (solution == 'TransientSolution' and md.transient.ismasstransport == 0): + # Early return + if 'MasstransportAnalysis' in analyses and not solution == 'TransientSolution' and not md.transient.ismasstransport: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.meltrate_factor', '>=', 0, 'numel', [1]) md = checkfield(md, 'fieldname', 'basalforcings.threshold_thickness', '>=', 0, 'numel', [1]) md = checkfield(md, 'fieldname', 'basalforcings.upperdepth_melt', '<=', 0, 'numel', [1]) - if 'BalancethicknessAnalysis' in analyses: md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'basalforcings.meltrate_factor', '>=', 0, 'numel', [1]) md = checkfield(md, 'fieldname', 'basalforcings.threshold_thickness', '>=', 0, 'numel', [1]) md = checkfield(md, 'fieldname', 'basalforcings.upperdepth_melt', '<=', 0, 'numel', [1]) - - if 'ThermalAnalysis' in analyses and not (solution == 'TransientSolution' and md.transient.isthermal == 0): + if 'ThermalAnalysis' in analyses and not (solution == 'TransientSolution' and not md.transient.isthermal): md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'basalforcings.meltrate_factor', '>=', 0, 'numel', [1]) md = checkfield(md, 'fieldname', 'basalforcings.threshold_thickness', '>=', 0, 'numel', [1]) @@ -84,7 +81,6 @@ yts = md.constants.yts if yts != 365.2422 * 24. * 3600.: print('WARNING: value of yts for MISMIP + runs different from ISSM default!') - WriteData(fid, prefix, 'name', 'md.basalforcings.model', 'data', 3, 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'fieldname', 'groundedice_melting_rate', 'format', 'DoubleMat', 'name', 'md.basalforcings.groundedice_melting_rate', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'geothermalflux', 'name', 'md.basalforcings.geothermalflux', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts) Index: ../trunk-jpl/src/m/qmu/qmupart2npart.py =================================================================== --- ../trunk-jpl/src/m/qmu/qmupart2npart.py (revision 25687) +++ ../trunk-jpl/src/m/qmu/qmupart2npart.py (revision 25688) @@ -1,10 +1,6 @@ -import numpy as np - - def qmupart2npart(vector): # Vector is full of -1 (no partition) and 0 to npart. We need to identify # npart. + npart = int(vector.max() + 1) # cast to int as we may have a NumPy floating point type, which cannot be used as an argument to function range (see src/m/qmu/setupdesign/QmuSetupVariables.py) - npart = vector.max() + 1 - return npart Index: ../trunk-jpl/src/m/qmu/dakota_out_parse.m =================================================================== --- ../trunk-jpl/src/m/qmu/dakota_out_parse.m (revision 25687) +++ ../trunk-jpl/src/m/qmu/dakota_out_parse.m (revision 25688) @@ -89,12 +89,12 @@ fline=fgetl(fidi); [ntokens,tokens]=fltokens(fline); method=tokens{1}{1}; - display(sprintf('Dakota method =''%s''.',method)); + display(sprintf('Dakota method = ''%s''.',method)); elseif strcmp(tokens{1}{1}(7),'N') [fline]=findline(fidi,'methodName = '); [ntokens,tokens]=fltokens(fline); method=tokens{1}{3}; - display(sprintf('Dakota methodName=''%s''.',method)); + display(sprintf('Dakota methodName = ''%s''.',method)); end end Index: ../trunk-jpl/src/m/qmu/postqmu.py =================================================================== --- ../trunk-jpl/src/m/qmu/postqmu.py (revision 25687) +++ ../trunk-jpl/src/m/qmu/postqmu.py (revision 25688) @@ -1,3 +1,4 @@ +from copy import deepcopy from os import getpid, stat from os.path import isfile from subprocess import call @@ -4,7 +5,7 @@ from dakota_out_parse import * from helpers import * -from loadresultsfromdisk import * +import loadresultsfromdisk as loadresults def postqmu(md): @@ -47,16 +48,17 @@ if md.qmu.output and md.qmu.statistics.method[0]['name'] == 'None': if md.qmu.method.method == 'nond_sampling': dakotaresults.modelresults = [] - md2 = copy.deepcopy(md) + md2 = deepcopy(md) md2.qmu.isdakota = 0 for i in range(md2.qmu.method.params.samples): - print('reading qmu file {}.outbin.{}'.format(md2.miscellaneous.name, i)) - md2 = loadresultsfromdisk(md2, '{}.outbin.{}'.format(md2.miscellaneous.name, i)) + outbin_name = '{}.outbin.{}'.format(md2.miscellaneous.name, (i + 1)) + print('reading qmu file {}'.format(outbin_name)) + md2 = loadresults.loadresultsfromdisk(md2, outbin_name) dakotaresults.modelresults.append(md2.results) if md.qmu.statistics.method[0]['name'] != 'None': md.qmu.isdakota = 0 - md = loadresultsfromdisk(md, [md.miscellaneous.name,'.stats']) + md = loadresults.loadresultsfromdisk(md, '{}.stats'.format(md.miscellaneous.name)) md.qmu.isdakota = 1 # put dakotaresults in their right location. Index: ../trunk-jpl/src/m/qmu/qmupart2npart.m =================================================================== --- ../trunk-jpl/src/m/qmu/qmupart2npart.m (revision 25687) +++ ../trunk-jpl/src/m/qmu/qmupart2npart.m (revision 25688) @@ -1,8 +1,3 @@ function npart=qmupart2npart(vector) - - %vector is full of -1 (no partition) and 0 to npart. We need to - %identify npart - + %vector is full of -1 (no partition) and 0 to npart. We need to identify npart= npart=max(vector)+1; - - Index: ../trunk-jpl/src/m/qmu/dakota_out_parse.py =================================================================== --- ../trunk-jpl/src/m/qmu/dakota_out_parse.py (revision 25687) +++ ../trunk-jpl/src/m/qmu/dakota_out_parse.py (revision 25688) @@ -86,12 +86,12 @@ fline = fidi.readline() [ntokens, tokens] = fltokens(fline) method = tokens[0].strip() - print('Dakota method =\'' + method + '\'.') + print('Dakota method = \'' + method + '\'.') elif fline[6] in ['N', 'n']: fline = findline(fidi, 'methodName = ') [ntokens, tokens] = fltokens(fline) method = tokens[2].strip() - print('Dakota methodName = "' + method + '".') + print('Dakota methodName = \'' + method + '\'.') # loop through the file to find the function evaluation summary counter = 0 Index: ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py =================================================================== --- ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py (revision 25687) +++ ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py (revision 25688) @@ -1,4 +1,5 @@ from copy import deepcopy + from helpers import * from MatlabFuncs import * from normal_uncertain import * @@ -7,21 +8,23 @@ def QmuSetupVariables(md, variables): - #get descriptor + """QMUSETUPVARIABLES function + """ + + # Get descriptor descriptor = variables.descriptor - #decide whether this is a distributed variable, which will drive whether we expand it into npart values, - #or if we just carry it forward as is. + # Decide whether this is a distributed variable, which will drive whether we expand it into npart values, or if we just carry it forward as is + # Ok, key off according to type of descriptor dvar = [] - - #ok, key off according to type of descriptor: if strncmp(descriptor, 'scaled_', 7): - #we have a scaled variable, expand it over the partition. First recover the partition. + # We have a scaled variable, expand it over the partition. First recover the partition. partition = variables.partition - #figure out number of partitions + # Figure out number of partitions npart = qmupart2npart(partition) - #figure out number of time steps + + # Figure out number of time steps nt = variables.nsteps if isinstance(variables, uniform_uncertain): @@ -43,13 +46,12 @@ if nstddev != nt or nmean != nt: raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should have the same number of cols as the number of time steps') - #ok, dealing with semi-discrete distributed variable. Distribute according to how many - #partitions we want, and number of time steps + # Ok, dealing with semi-discrete distributed variable. Distribute according to how many partitions we want, and number of time steps if nt == 1: for j in range(npart): dvar.append(deepcopy(variables)) - # text parsing in dakota requires literal "'identifier'" not just "identifier" + # Text parsing in Dakota requires literal "'identifier'" not just "identifier" dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'" if isinstance(variables, uniform_uncertain): @@ -63,7 +65,7 @@ for k in range(nt): dvar.append(deepcopy(variables)) - # text parsing in dakota requires literal "'identifier'" not just "identifier" + # Text parsing in Dakota requires literal "'identifier'" not just "identifier" dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + '_' + str(k + 1) + "'" if isinstance(variables, uniform_uncertain): @@ -75,7 +77,7 @@ else: dvar.append(deepcopy(variables)) - # text parsing in dakota requires literal "'identifier'" not just "identifier" + # text parsing in Dakota requires literal "'identifier'" not just "identifier" dvar[-1].descriptor = "'" + str(variables.descriptor) + "'" return dvar Index: ../trunk-jpl/src/m/qmu/preqmu.py =================================================================== --- ../trunk-jpl/src/m/qmu/preqmu.py (revision 25687) +++ ../trunk-jpl/src/m/qmu/preqmu.py (revision 25688) @@ -10,7 +10,7 @@ def preqmu(md, options): - """QMU - apply Quantification of Margins and Uncertainties techniques + """PREQMU - apply Quantification of Margins and Uncertainties techniques to a solution sequence (like stressbalance.py, progonstic.py, etc ...), using the Dakota software from Sandia. @@ -35,24 +35,25 @@ options.addfielddefault('imethod', 0) options.addfielddefault('iparams', 0) - # when running in library mode, the in file needs to be called md.miscellaneous.name.qmu.in + # When running in library mode, the in file needs to be called md.miscellaneous.name.qmu.in qmufile = md.miscellaneous.name - # retrieve variables and resposnes for this particular analysis. + # Retrieve variables and resposnes for this particular analysis. #print type(md.qmu.variables) #print md.qmu.variables.__dict__ - #print ivar + # Print ivar variables = md.qmu.variables #[ivar] responses = md.qmu.responses #[iresp] - # expand variables and responses + # Expand variables and responses #print variables.__dict__ #print responses.__dict__ variables = expandvariables(md, variables) responses = expandresponses(md, responses) - # go through variables and responses, and check they don't have more than - # md.qmu.numberofpartitions values. Also determine numvariables and numresponses + # Go through variables and responses, and check they don't have more than + # md.qmu.numberofpartitions values. Also determine numvariables and + # numresponses #{{{ numvariables = 0 variable_fieldnames = fieldnames(variables) @@ -81,11 +82,11 @@ numresponses = numresponses + np.size(vars(responses)[field_name]) #}}} - # create in file for dakota + # Create in file for Dakota #dakota_in_data(md.qmu.method[imethod], variables, responses, md.qmu.params[iparams], qmufile) dakota_in_data(md.qmu.method, variables, responses, md.qmu.params, qmufile) - # build a list of variables and responses descriptors. the list is not expanded. + # Build a list of variables and responses descriptors. the list is not expanded. #{{{ variabledescriptors = [] # variable_fieldnames = fieldnames(md.qmu.variables[ivar]) @@ -114,7 +115,7 @@ responsedescriptors.append(fieldresponses.descriptor) #}}} - #build a list of variable partitions + # Build a list of variable partitions variablepartitions = [] variablepartitions_npart = [] variablepartitions_nt = [] @@ -124,10 +125,13 @@ fieldvariable = vars(md.qmu.variables)[field_name] if type(fieldvariable) in [list, np.ndarray]: for j in range(np.size(fieldvariable)): - if fieldvariable[j].isscaled(): + if fieldvariable[j].isscaled() or fieldvariable[j].isdistributed(): variablepartitions.append(fieldvariable[j].partition) variablepartitions_npart.append(qmupart2npart(fieldvariable[j].partition)) - variablepartitions_nt.append(fieldvariable[j].nsteps) + if hasattr(fieldvariable[j], 'nsteps'): + variablepartitions_nt.append(fieldvariable[j].nsteps) + else: + variablepartitions_nt.append(1) else: variablepartitions.append([]) variablepartitions_npart.append(0) @@ -136,15 +140,18 @@ if fieldvariable.isscaled(): variablepartitions.append(fieldvariable.partition) variablepartitions_npart.append(qmupart2npart(fieldvariable.partition)) - variablepartitions_nt.append(fieldvariable.nsteps) + if hasattr(fieldvariable, 'nsteps'): + variablepartitions_nt.append(fieldvariable.nsteps) + else: + variablepartitions_nt.append(1) else: variablepartitions.append([]) variablepartitions_npart.append(0) variablepartitions_nt.append(1) - #build a list of response partitions + # Build a list of response partitions responsepartitions = [] - responsepartitions_npart = [] + responsepartitions_npart = np.array([]) response_fieldnames = fieldnames(md.qmu.responses) for i in range(len(response_fieldnames)): field_name = response_fieldnames[i] @@ -153,19 +160,22 @@ for j in range(np.size(fieldresponses)): if fieldresponse[j].isscaled(): responsepartitions.append(fieldresponse[j].partition) - responsepartitions_npart.append(qmupart2npart(fieldresponse[j].partition)) + responsepartitions_npart = np.append(responsepartitions_npart, qmupart2npart(fieldresponse[j].partition)) else: responsepartitions.append([]) - responsepartitions_npart.append(0) + responsepartitions_npart = np.append(responsepartitions_npart, 0) else: if fieldresponse.isscaled(): responsepartitions.append(fieldresponse.partition) - responsepartitions_npart.append(qmupart2npart(fieldresponse.partition)) + responsepartitions_npart = np.append(responsepartitions_npart, qmupart2npart(fieldresponse.partition)) else: responsepartitions.append([]) - responsepartitions_npart.append(0) + responsepartitions_npart = np.append(responsepartitions_npart, 0) - # register the fields that will be needed by the Qmu model. + if responsepartitions_npart.shape[0] != 1: + responsepartitions_npart = responsepartitions_npart.reshape(1, -1) + + # Register the fields that will be needed by the Qmu model. md.qmu.numberofresponses = numresponses md.qmu.variabledescriptors = variabledescriptors md.qmu.variablepartitions = variablepartitions @@ -175,9 +185,9 @@ md.qmu.responsepartitions = responsepartitions md.qmu.responsepartitions_npart = responsepartitions_npart - # now, we have to provide all the info necessary for the solutions to compute the - # responses. For ex, if mass_flux is a response, we need a profile of points. - # For a misfit, we need the observed velocity, etc ... + # Now, we have to provide all the info necessary for the solutions to + # compute the responses. For example, if mass_flux is a response, we need a + # profile of points. For a misfit, we need the observed velocity, etc. md = process_qmu_response_data(md) return md Index: ../trunk-jpl/src/m/qmu/preqmu.m =================================================================== --- ../trunk-jpl/src/m/qmu/preqmu.m (revision 25687) +++ ../trunk-jpl/src/m/qmu/preqmu.m (revision 25688) @@ -100,7 +100,7 @@ if fieldvariable.isscaled() | fieldvariable.isdistributed(); variablepartitions{end+1}=fieldvariable.partition; variablepartitions_npart(end+1)=qmupart2npart(fieldvariable.partition); - if isfield(struct(fieldvariable),'nsteps'), + if isprop(fieldvariable,'nsteps'), variablepartitions_nt(end+1)=fieldvariable.nsteps; else variablepartitions_nt(end+1)=1; Index: ../trunk-jpl/src/m/solve/marshall.py =================================================================== --- ../trunk-jpl/src/m/solve/marshall.py (revision 25687) +++ ../trunk-jpl/src/m/solve/marshall.py (revision 25688) @@ -4,15 +4,15 @@ def marshall(md): - ''' - MARSHALL - outputs a compatible binary file from @model md, for certain solution type. + """MARSHALL - outputs a compatible binary file from @model md, for certain solution type. - The routine creates a compatible binary file from @model md - This binary file will be used for parallel runs in JPL-package + The routine creates a compatible binary file from @model md + his binary file will be used for parallel runs in JPL-package - Usage: - marshall(md) - ''' + Usage: + marshall(md) + """ + if md.verbose.solution: print("marshalling file {}.bin".format(md.miscellaneous.name)) @@ -22,7 +22,9 @@ except IOError as e: raise IOError("marshall error message: could not open '%s.bin' file for binary writing. Due to: ".format(md.miscellaneous.name), e) - for field in md.properties(): + fields = md.properties() + fields.sort() # sort fields so that comparison of binary files is easier + for field in fields: #Some properties do not need to be marshalled if field in ['results', 'radaroverlay', 'toolkits', 'cluster', 'private']: continue @@ -41,9 +43,11 @@ #close file try: fid.close() - # # Uncomment the following to make a copy of the binary input file for - # # debugging purposes (can be fed into scripts/ReadBin.py). - copy_cmd = "cp {}.bin {}.py.bin".format(md.miscellaneous.name, md.miscellaneous.name) - subprocess.call(copy_cmd, shell=True) + + # Uncomment the following to make a copy of the binary input file for + # debugging purposes (can be fed into scripts/ReadBin.py). + # copy_cmd = "cp {}.bin {}.py.bin".format(md.miscellaneous.name, md.miscellaneous.name) + # subprocess.call(copy_cmd, shell=True) + except IOError as e: print("marshall error message: could not close file '{}.bin' due to:".format(md.miscellaneous.name), e) Index: ../trunk-jpl/src/m/solve/marshall.m =================================================================== --- ../trunk-jpl/src/m/solve/marshall.m (revision 25687) +++ ../trunk-jpl/src/m/solve/marshall.m (revision 25688) @@ -17,8 +17,8 @@ error(['marshall error message: could not open ' [md.miscellaneous.name '.bin'],' file for binary writing']); end -%Go through all model fields: check that it is a class and call checkconsistency -fields=properties('model'); +% Go through all model fields: check that it is a class and call checkconsistency +fields=sort(properties('model')); %sort fields so that comparison of binary files is easier for i=1:length(fields), field=fields{i}; @@ -42,9 +42,11 @@ %close file st=fclose(fid); -% % Uncomment the following to make a copy of the binary input file for debugging -% % purposes (can be fed into scripts/ReadBin.py). -copyfile([md.miscellaneous.name '.bin'], [md.miscellaneous.name '.m.bin']) + +% Uncomment the following to make a copy of the binary input file for debugging +% purposes (can be fed into scripts/ReadBin.py). +% copyfile([md.miscellaneous.name '.bin'], [md.miscellaneous.name '.m.bin']) + if st==-1, error(['marshall error message: could not close file ' [md.miscellaneous.name '.bin']]); end Index: ../trunk-jpl/src/m/solve/WriteData.py =================================================================== --- ../trunk-jpl/src/m/solve/WriteData.py (revision 25687) +++ ../trunk-jpl/src/m/solve/WriteData.py (revision 25688) @@ -163,10 +163,10 @@ fid.write(pack('i', 1)) for i in range(s[0]): if np.ndim(data) == 1: - fid.write(pack('d', float(data[i]))) #get to the "c" convention, hence the transpose + fid.write(pack('d', float(data[i]))) else: for j in range(s[1]): - fid.write(pack('d', float(data[i][j]))) #get to the "c" convention, hence the transpose + fid.write(pack('d', float(data[i][j]))) # }}} elif datatype == 'DoubleMat': # {{{ @@ -351,11 +351,10 @@ def FormatToCode(datatype): # {{{ + """ FORMATTOCODE - This routine takes the datatype string, and hardcodes it + into an integer, which is passed along the record, in order to identify the + nature of the dataset being sent. """ - This routine takes the datatype string, and hardcodes it into an integer, which - is passed along the record, in order to identify the nature of the dataset being - sent. - """ if datatype == 'Boolean': code = 1 elif datatype == 'Integer': Index: ../trunk-jpl/src/m/solve/loadresultsfromcluster.py =================================================================== --- ../trunk-jpl/src/m/solve/loadresultsfromcluster.py (revision 25687) +++ ../trunk-jpl/src/m/solve/loadresultsfromcluster.py (revision 25688) @@ -42,8 +42,8 @@ filelist.append('dakota_tabular.dat') if md.qmu.output and md.qmu.statistics.method[0]['name'] == 'None': if md.qmu.method.method == 'nond_sampling': - for i in range(len(md.qmu.method.params.samples)): - filelist.append(md.miscellaneous.name + '.outbin' + str(i)) + for i in range(md.qmu.method.params.samples): + filelist.append(md.miscellaneous.name + '.outbin.' + str(i + 1)) if md.qmu.statistics.method[0]['name'] != 'None': filelist.append(md.miscellaneous.name + '.stats') else: Index: ../trunk-jpl/src/m/miscellaneous/pretty_print.m =================================================================== --- ../trunk-jpl/src/m/miscellaneous/pretty_print.m (revision 25687) +++ ../trunk-jpl/src/m/miscellaneous/pretty_print.m (revision 25688) @@ -14,64 +14,57 @@ % large data structure (default is length of 6). % - Add an argument that allows the user to specify the number of values that % they would like to display from the head and tail of each dimension of -% 'data'. +% 'data' (default is 3 from each end). +% - Add an argument that allows the user to designate the number of +% significant figures to print for each floating point value (default is 8). if ndims(data)==1 if length(data)>6 - disp(sprintf('[%d %d %d ... %d %d %d]',data(1),data(2),data(3),data(end-2),data(end-1),data(end))); + output=sprintf('[%.8f %.8f %.8f ... %.8f %.8f %.8f]',data(1),data(2),data(3),data(end-2),data(end-1),data(end)); else - disp(data); + output=sprintf('%.8f',data); end elseif ndims(data)==2 shape=size(data); if shape(1)>6 - % if shape(2)==1 - % disp('NOTE: Single column printed as row!'); - % disp(sprintf('[%d %d %d ... %d %d %d]',data(1,1),data(2,1),data(3,1),data(end-2,1),data(end-1,1),data(end,1))); - % else if shape(2)>6 - disp('['); - disp(sprintf('[%d %d %d ... %d %d %d]',data(1,1),data(1,2),data(1,3),data(1,end-2),data(1,end-1),data(1,end))); - disp(sprintf('[%d %d %d ... %d %d %d]',data(2,1),data(2,2),data(2,3),data(2,end-2),data(2,end-1),data(2,end))); - disp(sprintf('[%d %d %d ... %d %d %d]',data(3,1),data(3,2),data(3,3),data(3,end-2),data(3,end-1),data(3,end))); - disp('...'); - disp(sprintf('[%d %d %d ... %d %d %d]',data(end-2,1),data(end-2,2),data(end-2,3),data(end-2,end-2),data(end-2,end-1),data(end-2,end))); - disp(sprintf('[%d %d %d ... %d %d %d]',data(end-1,1),data(end-1,2),data(end-1,3),data(end-1,end-2),data(end-1,end-1),data(end-1,end))); - disp(sprintf('[%d %d %d ... %d %d %d]',data(end,1),data(end,2),data(end,3),data(end,end-2),data(end,end-1),data(end,end))); - disp(sprintf(']\n')); + output=sprintf('[[%.8f %.8f %.8f ... %.8f %.8f %.8f]\n',data(1,1),data(1,2),data(1,3),data(1,end-2),data(1,end-1),data(1,end)); + output=sprintf('%s [%.8f %.8f %.8f ... %.8f %.8f %.8f]\n',data(2,1),data(2,2),data(2,3),data(2,end-2),data(2,end-1),data(2,end)); + output=sprintf('%s [%.8f %.8f %.8f ... %.8f %.8f %.8f]\n',data(3,1),data(3,2),data(3,3),data(3,end-2),data(3,end-1),data(3,end)); + output=sprintf('%s ...\n',output); + output=sprintf('%s [%.8f %.8f %.8f ... %.8f %.8f %.8f]\n',data(end-2,1),data(end-2,2),data(end-2,3),data(end-2,end-2),data(end-2,end-1),data(end-2,end)); + output=sprintf('%s [%.8f %.8f %.8f ... %.8f %.8f %.8f]\n',data(end-1,1),data(end-1,2),data(end-1,3),data(end-1,end-2),data(end-1,end-1),data(end-1,end)); + output=sprintf('%s [%.8f %.8f %.8f ... %.8f %.8f %.8f]]',data(end,1),data(end,2),data(end,3),data(end,end-2),data(end,end-1),data(end,end)); else - disp('['); - disp(data(1,:)); - disp(data(2,:)); - disp(data(3,:)); - disp('...'); - disp(data(end-2,:)); - disp(data(end-1,:)); - disp(data(end,:)); - disp(sprintf(']\n')); + output=sprintf('[[%.8f]\n',data(1,:)); + output=sprintf('%s [%.8f]\n',output,data(2,:)); + output=sprintf('%s [%.8f]\n',output,data(3,:)); + output=sprintf('%s ...\n',output); + output=sprintf('%s [%.8f]\n',output,data(end-2,:)); + output=sprintf('%s [%.8f]\n',output,data(end-1,:)); + output=sprintf('%s [%.8f]]',output,data(end,:)); end else - % if shape(2)==1 - % data=transpose(data) - % sprintf('% NOTE: Single column printed as row!'); - % % Get shape of transposed structure - % shape=size(data); - % if shape(2)>6 - % disp(sprintf('[%d %d %d ... %d %d %d]',data(1,1),data(2,1),data(3,1),data(end-2,1),data(end-1,1),data(end,1))); - % else - % disp(data); - % end - % else if shape(2)>6 - disp('['); for i=1:shape(1) - disp(sprintf('[%d %d %d ... %d %d %d]',data(i,1),data(i,2),data(i,3),data(i,end-2),data(i,end-1),data(i,end))); + if i==1 + output='['; + else + output=sprintf('%s ',output); + end + + output=sprintf('%s[%.8f %.8f %.8f ... %.8f %.8f %.8f]',output,data(i,1),data(i,2),data(i,3),data(i,end-2),data(i,end-1),data(i,end)); + + if i==shape(1) + output=sprintf('%s]',output); + end end - disp(sprintf(']\n')); else - disp(data); + output=sprintf('%.8f',data); end end else - disp(data); + output=sprintf('%.8f',data); end + +disp(output); Index: ../trunk-jpl/src/c/modules/FrontalForcingsx =================================================================== --- ../trunk-jpl/src/c/modules/FrontalForcingsx (revision 25687) +++ ../trunk-jpl/src/c/modules/FrontalForcingsx (revision 25688) Property changes on: ../trunk-jpl/src/c/modules/FrontalForcingsx ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,2 ## +.deps +.dirstamp Index: ../trunk-jpl/src/c/modules/OceanExchangeDatax =================================================================== --- ../trunk-jpl/src/c/modules/OceanExchangeDatax (revision 25687) +++ ../trunk-jpl/src/c/modules/OceanExchangeDatax (revision 25688) Property changes on: ../trunk-jpl/src/c/modules/OceanExchangeDatax ___________________________________________________________________ Added: svn:ignore ## -0,0 +1 ## +.deps Index: ../trunk-jpl/src/c/modules/QmuStatisticsx/QmuStatisticsx.cpp =================================================================== --- ../trunk-jpl/src/c/modules/QmuStatisticsx/QmuStatisticsx.cpp (revision 25687) +++ ../trunk-jpl/src/c/modules/QmuStatisticsx/QmuStatisticsx.cpp (revision 25688) @@ -1000,7 +1000,7 @@ } /*}}}*/ } - _printf0_("Done with MeanVariance :\n"); + _printf0_("Done with MeanVariance:\n"); IssmComm::SetComm(ISSM_MPI_COMM_WORLD); } /*}}}*/ @@ -1160,7 +1160,7 @@ } } } - _printf0_("Done with SampleSeries :\n"); + _printf0_("Done with SampleSeries:\n"); IssmComm::SetComm(ISSM_MPI_COMM_WORLD); } /*}}}*/ Index: ../trunk-jpl/src/c/modules/QmuStatisticsx =================================================================== --- ../trunk-jpl/src/c/modules/QmuStatisticsx (revision 25687) +++ ../trunk-jpl/src/c/modules/QmuStatisticsx (revision 25688) Property changes on: ../trunk-jpl/src/c/modules/QmuStatisticsx ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,2 ## +.deps +.dirstamp Index: ../trunk-jpl/src/c/modules/KillIcebergsx =================================================================== --- ../trunk-jpl/src/c/modules/KillIcebergsx (revision 25687) +++ ../trunk-jpl/src/c/modules/KillIcebergsx (revision 25688) Property changes on: ../trunk-jpl/src/c/modules/KillIcebergsx ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,2 ## +.dirstamp +.deps Index: ../trunk-jpl/src/c/toolkits/codipack =================================================================== --- ../trunk-jpl/src/c/toolkits/codipack (revision 25687) +++ ../trunk-jpl/src/c/toolkits/codipack (revision 25688) Property changes on: ../trunk-jpl/src/c/toolkits/codipack ___________________________________________________________________ Added: svn:ignore ## -0,0 +1 ## +.deps Index: ../trunk-jpl/src/c/classes/Inputs =================================================================== --- ../trunk-jpl/src/c/classes/Inputs (revision 25687) +++ ../trunk-jpl/src/c/classes/Inputs (revision 25688) Property changes on: ../trunk-jpl/src/c/classes/Inputs ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,2 ## +.deps +.dirstamp Index: ../trunk-jpl/src/c/classes/Dakota =================================================================== --- ../trunk-jpl/src/c/classes/Dakota (revision 25687) +++ ../trunk-jpl/src/c/classes/Dakota (revision 25688) Property changes on: ../trunk-jpl/src/c/classes/Dakota ___________________________________________________________________ Modified: svn:ignore ## -1 +1,2 ## .deps +.dirstamp Index: ../trunk-jpl/src/c =================================================================== --- ../trunk-jpl/src/c (revision 25687) +++ ../trunk-jpl/src/c (revision 25688) Property changes on: ../trunk-jpl/src/c ___________________________________________________________________ Modified: svn:ignore ## -22,3 +22,4 ## issm_slr issm_ocean issm_dakota +issm_post Index: ../trunk-jpl/src/wrappers/CoordTransform =================================================================== --- ../trunk-jpl/src/wrappers/CoordTransform (revision 25687) +++ ../trunk-jpl/src/wrappers/CoordTransform (revision 25688) Property changes on: ../trunk-jpl/src/wrappers/CoordTransform ___________________________________________________________________ Added: svn:ignore ## -0,0 +1 ## +.deps Index: ../trunk-jpl/test/NightlyRun/test2006.m =================================================================== --- ../trunk-jpl/test/NightlyRun/test2006.m (revision 25687) +++ ../trunk-jpl/test/NightlyRun/test2006.m (revision 25688) @@ -52,7 +52,6 @@ %Materials: md.materials=materials('hydro'); - %Miscellaneous md.miscellaneous.name='test2006'; @@ -137,43 +136,44 @@ end end % }}} - %algorithm: % {{{ - md.qmu.method =dakota_method('nond_samp'); - md.qmu.method(end)=dmeth_params_set(md.qmu.method(end),... - 'seed',1234,... - 'samples',10,... - 'sample_type','random'); - md.qmu.output=1; - %}}} - %parameters % {{{ - md.qmu.params.direct=true; - md.qmu.params.interval_type='forward'; - md.qmu.params.analysis_driver='matlab'; - md.qmu.params.evaluation_scheduling='master'; - md.qmu.params.processors_per_evaluation=2; - md.qmu.params.tabular_graphics_data=true; - md.qmu.isdakota=1; - md.verbose=verbose(0); md.verbose.qmu=1; - % }}} - %qmu statistics %{{{ - md.qmu.statistics.nfiles_per_directory=2; - md.qmu.statistics.ndirectories=5; - - md.qmu.statistics.method(1).name='Histogram'; - md.qmu.statistics.method(1).fields={'Sealevel','BslrIce'}; - md.qmu.statistics.method(1).steps=1:10; - md.qmu.statistics.method(1).nbins=20; - md.qmu.statistics.method(2).name='MeanVariance'; - md.qmu.statistics.method(2).fields={'Sealevel','BslrIce'}; - md.qmu.statistics.method(2).steps=[1:10]; +%algorithm: % {{{ +md.qmu.method =dakota_method('nond_samp'); +md.qmu.method(end)=dmeth_params_set(md.qmu.method(end),... +'seed',1234,... +'samples',10,... +'sample_type','random'); +md.qmu.output=1; +%}}} +%parameters % {{{ +md.qmu.params.direct=true; +md.qmu.params.interval_type='forward'; +md.qmu.params.analysis_driver='matlab'; +md.qmu.params.evaluation_scheduling='master'; +md.qmu.params.processors_per_evaluation=2; +md.qmu.params.tabular_graphics_data=true; +md.qmu.isdakota=1; +md.verbose=verbose(0); md.verbose.qmu=1; +% }}} +%qmu statistics %{{{ +md.qmu.statistics.nfiles_per_directory=2; +md.qmu.statistics.ndirectories=5; - md.qmu.statistics.method(3).name='SampleSeries'; - md.qmu.statistics.method(3).fields={'Sealevel','BslrIce'}; - md.qmu.statistics.method(3).steps=[1:10]; - md.qmu.statistics.method(3).indices=locations; - %}}} +md.qmu.statistics.method(1).name='Histogram'; +md.qmu.statistics.method(1).fields={'Sealevel','BslrIce'}; +md.qmu.statistics.method(1).steps=[1:10]; +md.qmu.statistics.method(1).nbins=20; +md.qmu.statistics.method(2).name='MeanVariance'; +md.qmu.statistics.method(2).fields={'Sealevel','BslrIce'}; +md.qmu.statistics.method(2).steps=[1:10]; + +md.qmu.statistics.method(3).name='SampleSeries'; +md.qmu.statistics.method(3).fields={'Sealevel','BslrIce'}; +md.qmu.statistics.method(3).steps=[1:10]; +md.qmu.statistics.method(3).indices=locations; +%}}} + %run transient dakota solution: mds=solve(md,'Transient'); @@ -181,7 +181,9 @@ md.qmu.statistics.method(1).name='None'; md=solve(md,'Transient'); -%compare statistics with our own here: +disp(mds.results.StatisticsSolution) + +%compare statistics with our own here: svalues=mds.results.StatisticsSolution(end).SealevelSamples; %all values at locations. dvalues=zeros(md.qmu.method.params.samples,length(locations)); @@ -189,6 +191,9 @@ dvalues(i,:)=md.results.dakota.modelresults{i}.TransientSolution(end).Sealevel(locations); end +disp(dvalues) +disp(size(dvalues)) + samplesnorm=norm(dvalues-svalues,'fro'); %Fields and tolerances to track changes Index: ../trunk-jpl/test/NightlyRun/test2006.py =================================================================== --- ../trunk-jpl/test/NightlyRun/test2006.py (nonexistent) +++ ../trunk-jpl/test/NightlyRun/test2006.py (revision 25688) @@ -0,0 +1,236 @@ +#Test Name: EarthSlr Dakota Sampling glaciers +import numpy as np + +from dmeth_params_set import * +from gmshplanet import * +from gmtmask import * +from lovenumbers import * +from materials import * +from model import * +from nodalvalue import * +from normal_uncertain import * +from response_function import * +from solve import * + + +# Mesh earth +md = model() +md.cluster = generic('name', oshostname(), 'np', 5) +md.mesh = gmshplanet('radius', 6.371012 * 1e3, 'resolution', 700.) #700 km resolution mesh + +# Parameterize solidearth solution +# Solidearth loading #{{{ +md.solidearth.surfaceload.icethicknesschange = np.zeros((md.mesh.numberofelements, 1)) +md.solidearth.sealevel = np.zeros((md.mesh.numberofvertices, 1)) +md.dsl.global_average_thermosteric_sea_level_change = np.zeros((2, 1)) +md.dsl.sea_surface_height_change_above_geoid = np.zeros((md.mesh.numberofvertices + 1, 1)) +md.dsl.sea_water_pressure_change_at_sea_floor = np.zeros((md.mesh.numberofvertices + 1, 1)) + +# Antarctica +late = md.mesh.lat[md.mesh.elements - 1].sum(axis=1) / 3 +longe = md.mesh.long[md.mesh.elements - 1].sum(axis=1) / 3 +pos = np.where(late < -80)[0] +md.solidearth.surfaceload.icethicknesschange[pos] = -100 +# Greenland +pos = np.where(np.logical_and.reduce((late > 70, late < 80, longe > -60, longe < -30)))[0] +md.solidearth.surfaceload.icethicknesschange[pos] = -100 + +# Elastic loading from love numbers +md.solidearth.lovenumbers = lovenumbers('maxdeg', 100) +#}}} + +# Mask #{{{ +mask = gmtmask(md.mesh.lat, md.mesh.long) +icemask = np.ones((md.mesh.numberofvertices, 1)) +pos = np.where(mask == 0)[0] +icemask[pos] = -1 +pos = np.where(mask[md.mesh.elements - 1].sum(axis=1) < 3)[0] +icemask[md.mesh.elements[pos, :] - 1] = -1 +md.mask.ice_levelset = icemask +md.mask.ocean_levelset = -icemask + +# Make sure that the elements that have loads are fully grounded +pos = np.nonzero(md.solidearth.surfaceload.icethicknesschange)[0] +md.mask.ocean_levelset[md.mesh.elements[pos, :] - 1] = 1 + +# Make sure wherever there is an ice load, that the mask is set to ice: +#pos = np.nonzero(md.solidearth.surfaceload.icethicknesschange)[0] # TODO: Do we need to do this twice? +md.mask.ice_levelset[md.mesh.elements[pos, :] - 1] = -1 +# }}} + +md.solidearth.settings.ocean_area_scaling = 0 + +# Geometry for the bed; arbitrary +md.geometry.bed = -np.ones((md.mesh.numberofvertices, 1)) + +# Materials +md.materials = materials('hydro') + +# Miscellaneous +md.miscellaneous.name = 'test2006' + +# Solution parameters +md.solidearth.settings.reltol = np.nan +md.solidearth.settings.abstol = 1e-3 +md.solidearth.settings.computesealevelchange = 1 + +# Max number of iterations reverted back to 10 (i.e. the original default value) +md.solidearth.settings.maxiter = 10 + +# Eustatic + rigid + elastic + rotation run +md.solidearth.settings.rigid = 1 +md.solidearth.settings.elastic = 1 +md.solidearth.settings.rotation = 1 + +# Transient settings +md.timestepping.start_time = 0 +md.timestepping.final_time = 10 +md.timestepping.time_step = 1 +md.transient.isslr = 1 +md.transient.issmb = 0 +md.transient.isgia = 1 +md.transient.ismasstransport = 0 +md.transient.isstressbalance = 0 +md.transient.isthermal = 0 +dh = np.asarray(md.solidearth.surfaceload.icethicknesschange).T +deltathickness = np.zeros((md.mesh.numberofelements + 1, 10)) +for i in range(10): + deltathickness[0:-1, i] = dh * (i + 1) +deltathickness[-1, :] = np.arange(0, 10, 1) +md.solidearth.surfaceload.icethicknesschange = deltathickness + +# Hack +md.geometry.surface = np.zeros((md.mesh.numberofvertices, 1)) +md.geometry.thickness = np.ones((md.mesh.numberofvertices, 1)) +md.geometry.base = -np.ones((md.mesh.numberofvertices, 1)) +md.geometry.bed = md.geometry.base + +# Uncertainty quantification +# Ice sheets #{{{ +npart = 1 +nt = 1 +partition = -np.ones((md.mesh.numberofelements, 1)) +pos = np.where(late < -80)[0] +partition[pos] = 0 +pos = np.where(np.logical_and.reduce((late > 70, late < 80, longe > -60, longe < -30)))[0] +partition[pos] = 0 + +# Variables +qmuvar = OrderedStruct() +qmuvar.surfaceload = normal_uncertain.normal_uncertain( + 'descriptor', 'scaled_SurfaceloadIceThicknessChange', + 'mean', 1 * np.ones((npart, nt)), + 'stddev', 1 * np.ones((npart, nt)), # 10% standard deviation + 'partition', partition, + 'transient', 'on', + 'nsteps', nt +) +#}}} + +# Correlation +md.qmu.correlation_matrix = [] + +# Variables final declaration +md.qmu.variables = OrderedStruct() +md.qmu.variables.surfaceload = qmuvar.surfaceload + +locations = [1, 5, 10, 15, 20] + +# Responses #{{{ +md.qmu.responses.sealevel1 = response_function.response_function('descriptor', 'Outputdefinition1') +md.qmu.responses.sealevel2 = response_function.response_function('descriptor', 'Outputdefinition2') +md.qmu.responses.sealevel3 = response_function.response_function('descriptor', 'Outputdefinition3') +md.qmu.responses.sealevel4 = response_function.response_function('descriptor', 'Outputdefinition4') +md.qmu.responses.sealevel5 = response_function.response_function('descriptor', 'Outputdefinition5') + +# Output definitions +for i in range(len(locations)): + if i == 0: + md.outputdefinition.definitions = [ + nodalvalue( + 'name', 'SNode', + 'definitionstring', 'Outputdefinition1', + 'model_string', 'Sealevel', + 'node', locations[i] + ) + ] + else: + md.outputdefinition.definitions.append( + nodalvalue( + 'name', 'SNode', + 'definitionstring', 'Outputdefinition' + str(i + 1), + 'model_string', 'Sealevel', + 'node', locations[i] + ) + ) +#}}} + +# Algorithm #{{{ +md.qmu.method = dakota_method.dakota_method('nond_samp') +md.qmu.method = dmeth_params_set( + md.qmu.method, + 'seed', 1234, + 'samples', 10, + 'sample_type', 'random' +) +md.qmu.output = 1 +#}}} +# Parameters #{{{ +md.qmu.params.direct = True +md.qmu.params.interval_type = 'forward' +md.qmu.params.analysis_driver = 'matlab' +md.qmu.params.evaluation_scheduling = 'master' +md.qmu.params.processors_per_evaluation = 2 +md.qmu.params.tabular_graphics_data = True +md.qmu.isdakota = 1 +md.verbose = verbose(0) +md.verbose.qmu = 1 +#}}} +# QMU statistics #{{{ + +# TODO: Abstract reshaping of arrays away to src/m/classes/qmustatistics.py::marshall or src/m/solve/WriteData.py +# +md.qmu.statistics.nfiles_per_directory = 2 +md.qmu.statistics.ndirectories = 5 + +md.qmu.statistics.method[0]['name'] = 'Histogram' +md.qmu.statistics.method[0]['fields'] = ['Sealevel', 'BslrIce'] +md.qmu.statistics.method[0]['steps'] = np.arange(1, 10 + 1).reshape(1, -1) +md.qmu.statistics.method[0]['nbins'] = 20 + +md.qmu.statistics.addmethod() +md.qmu.statistics.method[1]['name'] = 'MeanVariance' +md.qmu.statistics.method[1]['fields'] = ['Sealevel', 'BslrIce'] +md.qmu.statistics.method[1]['steps'] = np.arange(1, 10 + 1).reshape(1, -1) + +md.qmu.statistics.addmethod() +md.qmu.statistics.method[2]['name'] = 'SampleSeries' +md.qmu.statistics.method[2]['fields'] = ['Sealevel', 'BslrIce'] +md.qmu.statistics.method[2]['steps'] = np.arange(1, 10 + 1).reshape(1, -1) +md.qmu.statistics.method[2]['indices'] = np.asarray(locations).reshape(1, -1) +#}}} + +# Run transient Dakota solution +mds = solve(md, 'Transient') + +# Run without statistics computations +md.qmu.statistics.method[0]['name'] = 'None' +md = solve(md, 'Transient') + +# Compare statistics with our own here +# +# TODO: SealevelSamples should be an attribute of mds.results.StatisticsSolution[-1] (see test2006.m) +# +svalues = mds.results.StatisticsSolution[0].SealevelSamples # all values at locations + +dvalues = np.zeros((md.qmu.method.params.samples, len(locations))) + +for i in range(md.qmu.method.params.samples): + dvalues[i, :] = md.results.dakota.modelresults[i].TransientSolution[-1].Sealevel[locations].flatten() + +samplesnorm = np.linalg.norm(dvalues - svalues, 'fro') + +# Fields and tolerances to track changes +field_names = ['Samples Norm'] +field_tolerances = [1e-13] +field_values = [samplesnorm] Index: ../trunk-jpl/test/NightlyRun/runme.m =================================================================== --- ../trunk-jpl/test/NightlyRun/runme.m (revision 25687) +++ ../trunk-jpl/test/NightlyRun/runme.m (revision 25688) @@ -106,7 +106,7 @@ % }}} %Process Ids according to benchmarks{{{ if strcmpi(benchmark,'nightly'), - test_ids=intersect(test_ids,[1:999]); + test_ids=intersect(test_ids,union([1:999],[2001:2500])); elseif strcmpi(benchmark,'validation'), test_ids=intersect(test_ids,[1001:1999]); elseif strcmpi(benchmark,'ismip'), @@ -218,9 +218,7 @@ %our output is in the correct order (n,1) or (1,1), so we do not need to transpose again archive_cell=archread(['../Archives/' archive_name '.arch'],[archive_name '_field' num2str(k)]); archive=archive_cell{1}; - error_diff=full(max(abs(archive(:)-field(:)))/(max(abs(archive(:)))+eps)); - - %disp test result + error_diff=full(max(abs(archive(:)-field(:)))/(max(abs(archive(:)))+eps)); %disp test result if (error_diff>tolerance | isnan(error_diff)); disp(sprintf(['ERROR difference: %-7.2g > %7.2g test id: %i test name: %s field: %s'],... error_diff,tolerance,id,id_string,fieldname)); Index: ../trunk-jpl/test/NightlyRun =================================================================== --- ../trunk-jpl/test/NightlyRun (revision 25687) +++ ../trunk-jpl/test/NightlyRun (revision 25688) Property changes on: ../trunk-jpl/test/NightlyRun ___________________________________________________________________ Modified: svn:ignore ## -17,3 +17,6 ## run run.old run_matlab +test218.qmu.in +test218.qmu.out +test218.qmu.err Index: ../trunk-jpl/test =================================================================== --- ../trunk-jpl/test (revision 25687) +++ ../trunk-jpl/test (revision 25688) Property changes on: ../trunk-jpl/test ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /issm/branches/trunk-larour-SLPS2020/test:r25330-25625 Index: ../trunk-jpl/src/m/classes/results.py =================================================================== --- ../trunk-jpl/src/m/classes/results.py (revision 25687) +++ ../trunk-jpl/src/m/classes/results.py (revision 25688) @@ -2,19 +2,18 @@ class results(object): - ''' - RESULTS class definition + """RESULTS class definition - Usage: - results = results() + Usage: + results = results() - TODO: - - Modify output so that it matches that of + TODO: + - Modify output so that it matches that of - disp(md.results.<>) + disp(md.results.<>) - where <> is one of the values from solve.m - ''' + where <> is one of the values from solve.m + """ def __init__(self, *args): # {{{ pass Index: ../trunk-jpl/src/m/classes/qmustatistics.py =================================================================== --- ../trunk-jpl/src/m/classes/qmustatistics.py (revision 25687) +++ ../trunk-jpl/src/m/classes/qmustatistics.py (revision 25688) @@ -25,16 +25,15 @@ # name : name of method, one of 'None', 'Histogram', 'SampleSeries', or 'MeanVariance' # fields : fields for the statistics being requested, ex: 'Sealevel', 'BslrIce', 'BsrlHydro' # steps : time steps at which each field statistic is computed, ex: [1, 2, 5, 20] or [range(1:100)] - # nbins : number of bins for 'Histgogram' statistics + # nbins : number of bins for 'Histogram' statistics # indices : vertex indices at which to retrieve samples nargs = len(args) - if nargs == 0: # Create a default object self.setdefaultparameters() elif nargs == 1: - # NOTE: The following has not been tested. Remove this note when it has. + # NOTE: The following has not been tested. Remove this note when it has inputstruct = args[0] list1 = properties('qmustatistics') list2 = fieldnames(inputstruct) @@ -47,18 +46,14 @@ #}}} def __repr__(self): # {{{ - s = '{}\n'.format('qmustatistics: post-Dakota run processing of QMU statistics:') - + s = 'qmustatistics: post-Dakota run processing of QMU statistics:\n' if self.method[0]['name'] == 'None': return s - s += '{}\n'.format(fielddisplay(self, 'nfiles_per_directory', 'number of files per output directory')) s += '{}\n'.format(fielddisplay(self, 'ndirectories', 'number of output directories; should be < numcpus')) - for i in range(len(self.method)): s += '{}\n'.format(' method # {}'.format(i)) s += '{}\n'.format(self.method[i]) - return s #}}} @@ -66,6 +61,7 @@ self.method[0]['name'] = 'None' self.nfiles_per_directory = 5 # Number of files per output directory self.ndirectories = 50 # Number of output directories; should be < numcpus + return self #}}} @staticmethod @@ -114,18 +110,17 @@ WriteData(fid, prefix, 'name', 'md.qmu.statistics.nfiles_per_directory', 'data', self.nfiles_per_directory, 'format', 'Integer') WriteData(fid, prefix, 'name', 'md.qmu.statistics.ndirectories', 'data', self.ndirectories, 'format', 'Integer') WriteData(fid, prefix, 'name', 'md.qmu.statistics.numstatistics', 'data', len(self.method), 'format', 'Integer') - for i in range(len(self.method)): - m = self.method[i] - WriteData(fid, prefix, 'name', 'md.qmu.statistics.method[{}][\'name\']'.format(i), 'data', m['name'], 'Format', 'String') - WriteData(fid, prefix, 'data', m['fields'], 'name', 'md.qmu.statistics.method[{}][\'fields\']'.format(i), 'format', 'StringArray') - WriteData(fid, prefix, 'data', m['steps'], 'name', 'md.qmu.statistics.method[{}][\'steps\']'.format(i), 'format', 'IntMat', 'mattype', 3) - + for i in range(1, len(self.method) + 1): + m = self.method[i - 1] + WriteData(fid, prefix, 'name', 'md.qmu.statistics.method({}).name'.format(i), 'data', m['name'], 'format', 'String') + WriteData(fid, prefix, 'data', m['fields'], 'name', 'md.qmu.statistics.method({}).fields'.format(i), 'format', 'StringArray') + WriteData(fid, prefix, 'data', m['steps'], 'name', 'md.qmu.statistics.method({}).steps'.format(i), 'format', 'IntMat', 'mattype', 3) if m['name'] == 'Histogram': - WriteData(fid, prefix, 'name', 'md.qmu.statistics.method[{}][\'nbins\']'.format(i), 'data', m['nbins'], 'format', 'Integer') + WriteData(fid, prefix, 'name', 'md.qmu.statistics.method({}).nbins'.format(i), 'data', m['nbins'], 'format', 'Integer') elif m['name'] == 'MeanVariance': pass # do nothing elif m['name'] == 'SampleSeries': - WriteData(fid, prefix, 'data', m['indices'], 'name', 'md.qmu.statistics.method[{}][\'indices\']'.format(i), 'format', 'IntMat', 'mattype', 3) + WriteData(fid, prefix, 'data', m['indices'], 'name', 'md.qmu.statistics.method({}).indices'.format(i), 'format', 'IntMat', 'mattype', 3) else: raise Exception('qmustatistics marshall error message: unknown type ''{}'' for qmu.statistics.method[{}]'.format(m['name'], i)) #}}} @@ -133,3 +128,15 @@ def extrude(self, md): #{{{ return self #}}} + + def addmethod(self, *args): #{{{ + """ADDMETHOD - Add new, empty method or passed dict to self.method + """ + nargs = len(args) + if nargs == 0: + self.method.append({}) + elif nargs == 1: + self.method.append(args[0]) + else: + raise Exception('Number of args should be 0 (appends empty dict to methods member) or 1 (appends passed dict to methods member)') + #}}} Index: ../trunk-jpl/src/m/solve/parseresultsfromdisk.py =================================================================== --- ../trunk-jpl/src/m/solve/parseresultsfromdisk.py (revision 25687) +++ ../trunk-jpl/src/m/solve/parseresultsfromdisk.py (revision 25688) @@ -1,17 +1,81 @@ +from collections import OrderedDict import struct + import numpy as np -from collections import OrderedDict + import results as resultsclass -def parseresultsfromdisk(md, filename, iosplit): +def parseresultsfromdisk(md, filename, iosplit): #{{{ if iosplit: saveres = parseresultsfromdiskiosplit(md, filename) else: saveres = parseresultsfromdiskioserial(md, filename) + #saveres = parseresultsfromdiskioserialsequential(md, filename) return saveres +#}}} +def parseresultsfromdiskiosplit(md, filename): # {{{ + #Open file + try: + fid = open(filename, 'rb') + except IOError as e: + raise IOError("parseresultsfromdisk error message: could not open '{}' for binary reading.".format(filename)) + saveres = [] + + #if we have done split I / O, ie, we have results that are fragmented across patches, + #do a first pass, and figure out the structure of results + loadres = ReadDataDimensions(fid) + while loadres: + + #Get time and step + if loadres['step'] > len(saveres): + for i in range(len(saveres), loadres['step'] - 1): + saveres.append(None) + saveres.append(resultsclass.results()) + setattr(saveres[loadres['step'] - 1], 'step', loadres['step']) + setattr(saveres[loadres['step'] - 1], 'time', loadres['time']) + + #Add result + setattr(saveres[loadres['step'] - 1], loadres['fieldname'], float('NaN')) + + #read next result + loadres = ReadDataDimensions(fid) + + #do a second pass, and figure out the size of the patches + fid.seek(0) #rewind + loadres = ReadDataDimensions(fid) + while loadres: + + #read next result + loadres = ReadDataDimensions(fid) + + #third pass, this time to read the real information + fid.seek(0) #rewind + loadres = ReadData(fid, md) + while loadres: + + #Get time and step + if loadres['step'] > len(saveres): + for i in range(len(saveres), loadres['step'] - 1): + saveres.append(None) + saveres.append(saveresclass.saveres()) + setattr(saveres[loadres['step'] - 1], 'step', loadres['step']) + setattr(saveres[loadres['step'] - 1], 'time', loadres['time']) + + #Add result + setattr(saveres[loadres['step'] - 1], loadres['fieldname'], loadres['field']) + + #read next result + loadres = ReadData(fid, md) + + #close file + fid.close() + + return saveres +# }}} + def parseresultsfromdiskioserial(md, filename): # {{{ #Open file try: @@ -72,78 +136,14 @@ fid.close() return saveres - # }}} +# }}} - -def parseresultsfromdiskiosplit(md, filename): # {{{ - #Open file - try: - fid = open(filename, 'rb') - except IOError as e: - raise IOError("loadresultsfromdisk error message: could not open '{}' for binary reading.".format(filename)) - - saveres = [] - - #if we have done split I / O, ie, we have results that are fragmented across patches, - #do a first pass, and figure out the structure of results - loadres = ReadDataDimensions(fid) - while loadres: - - #Get time and step - if loadres['step'] > len(saveres): - for i in range(len(saveres), loadres['step'] - 1): - saveres.append(None) - saveres.append(resultsclass.results()) - setattr(saveres[loadres['step'] - 1], 'step', loadres['step']) - setattr(saveres[loadres['step'] - 1], 'time', loadres['time']) - - #Add result - setattr(saveres[loadres['step'] - 1], loadres['fieldname'], float('NaN')) - - #read next result - loadres = ReadDataDimensions(fid) - - #do a second pass, and figure out the size of the patches - fid.seek(0) #rewind - loadres = ReadDataDimensions(fid) - while loadres: - - #read next result - loadres = ReadDataDimensions(fid) - - #third pass, this time to read the real information - fid.seek(0) #rewind - loadres = ReadData(fid, md) - while loadres: - - #Get time and step - if loadres['step'] > len(saveres): - for i in range(len(saveres), loadres['step'] - 1): - saveres.append(None) - saveres.append(saveresclass.saveres()) - setattr(saveres[loadres['step'] - 1], 'step', loadres['step']) - setattr(saveres[loadres['step'] - 1], 'time', loadres['time']) - - #Add result - setattr(saveres[loadres['step'] - 1], loadres['fieldname'], loadres['field']) - - #read next result - loadres = ReadData(fid, md) - - #close file - fid.close() - - return saveres - # }}} - - def ReadData(fid, md): # {{{ - ''' - READDATA - + """READDATA - Usage: - field = ReadData(fid, md) - ''' + Usage: + field = ReadData(fid, md) + """ #read field try: @@ -290,18 +290,17 @@ saveres = None return saveres - # }}} +# }}} def ReadDataDimensions(fid): # {{{ - """ - READDATADIMENSIONS - read data dimensions, step and time, but not the data itself. + """READDATADIMENSIONS - read data dimensions, step and time, but not the data itself. - Usage: - field = ReadDataDimensions(fid) + Usage: + field = ReadDataDimensions(fid) """ - #read field + # Read field try: length = struct.unpack('i', fid.read(struct.calcsize('i')))[0] fieldname = struct.unpack('{}s'.format(length), fid.read(length))[0][:-1] @@ -309,7 +308,7 @@ step = struct.unpack('i', fid.read(struct.calcsize('i')))[0] datatype = struct.unpack('i', fid.read(struct.calcsize('i')))[0] M = struct.unpack('i', fid.read(struct.calcsize('i')))[0] - N = 1 #default + N = 1 # default if datatype == 1: fid.seek(M * 8, 1) elif datatype == 2: @@ -332,4 +331,4 @@ saveres = None return saveres - # }}} +# }}} Index: ../trunk-jpl/src/m/solve/parseresultsfromdisk.m =================================================================== --- ../trunk-jpl/src/m/solve/parseresultsfromdisk.m (revision 25687) +++ ../trunk-jpl/src/m/solve/parseresultsfromdisk.m (revision 25688) @@ -1,4 +1,4 @@ -function results=parseresultsfromdisk(md,filename,iosplit) +function results=parseresultsfromdisk(md,filename,iosplit) % {{{ if iosplit, results=parseresultsfromdiskiosplit(md,filename); @@ -6,53 +6,67 @@ results=parseresultsfromdiskioserial(md,filename); %results=parseresultsfromdiskioserialsequential(md,filename); end +% }}} +function results=parseresultsfromdiskiosplit(md,filename) % {{{ -function results=parseresultsfromdiskioserialsequential(md,filename) % {{{ - %Open file fid=fopen(filename,'rb'); if(fid==-1), error(['loadresultsfromdisk error message: could not open ',filename,' for binary reading']); end +results=struct(); -%first pass to figure out the steps we have: -steps=[]; -while 1, - result = ReadDataDimensions(fid); - if isempty(result), - break; +%if we have done split I/O, ie, we have results that are fragmented across patches, +%do a first pass, and figure out the structure of results +result=ReadDataDimensions(fid); +while ~isempty(result), + + %Get time and step + results(result.step).step=result.step; + if result.time~=-9999, + results(result.step).time=result.time; end - if result.step~=-9999, - steps=[steps result.step]; - end + + %Add result + results(result.step).(result.fieldname)=NaN; + + %read next result + result=ReadDataDimensions(fid); end -steps=unique(steps); +%do a second pass, and figure out the size of the patches +fseek(fid,0,-1); %rewind +result=ReadDataDimensions(fid); +while ~isempty(result), + %read next result + result=ReadDataDimensions(fid); +end -%create structure: -results=struct('step',num2cell(steps)); +%third pass, this time to read the real information +fseek(fid,0,-1); %rewind +result=ReadData(fid,md); +while ~isempty(result), -%second pass to fill the steps we have: -fseek(fid,0,-1); %rewind -while 1, - result = ReadData(fid,md); - if isempty(result), - break; + %Get time and step + results(result.step).step=result.step; + if result.time~=-9999, + results(result.step).time=result.time; end - if result.step==-9999, - result.step=1; - result.time=0; + + %Add result + results(result.step).(result.fieldname)=result.field; + + %read next result + try, + result=ReadData(fid,md); + catch me, + disp('WARNING: file corrupted, results partial recovery'); + result=[]; end - %find where we are putting this step: - ind=find(steps==result.step); - if isempty(ind), - error('could not find a step in our pre-structure! Something is very off!'); - end - %plug: - results(ind).time=result.time; - results(ind).(result.fieldname)=result.field; end + +%close file fclose(fid); % }}} function results=parseresultsfromdiskioserial(md,filename) % {{{ @@ -70,7 +84,7 @@ %read next result try, - result = ReadData(fid,md); + result = ReadData(fid,md); catch me, disp('WARNING: file corrupted, trying partial recovery'); continue; @@ -91,7 +105,7 @@ end fclose(fid); -%Now, process all results and find how many steps we have +%Now, process all results and find out how many steps we have numresults = numel(allresults); allsteps = zeros(numresults,1); for i=1:numresults @@ -104,18 +118,18 @@ results=struct(); for i=1:numresults result = allresults{i}; - index = 1; - if result.step ~= -9999 + if(result.step ~= -9999) index = find(result.step == allsteps); + results(index).step=result.step; end - - if(result.step~=-9999) results(index).step=result.step; end - if(result.time~=-9999) results(index).time=result.time; end + if(result.time~=-9999) + results(index).time=result.time; + end results(index).(result.fieldname)=result.field; end % }}} -function results=parseresultsfromdiskiosplit(md,filename) % {{{ +function results=parseresultsfromdiskioserialsequential(md,filename) % {{{ %Open file fid=fopen(filename,'rb'); @@ -122,61 +136,47 @@ if(fid==-1), error(['loadresultsfromdisk error message: could not open ',filename,' for binary reading']); end -results=struct(); -%if we have done split I/O, ie, we have results that are fragmented across patches, -%do a first pass, and figure out the structure of results -result=ReadDataDimensions(fid); -while ~isempty(result), - - %Get time and step - results(result.step).step=result.step; - if result.time~=-9999, - results(result.step).time=result.time; +%first pass to figure out the steps we have: +steps=[]; +while 1, + result = ReadDataDimensions(fid); + if isempty(result), + break; end + if result.step~=-9999, + steps=[steps result.step]; + end +end - %Add result - results(result.step).(result.fieldname)=NaN; +steps=unique(steps); - %read next result - result=ReadDataDimensions(fid); -end +%create structure: +results=struct('step',num2cell(steps)); -%do a second pass, and figure out the size of the patches +%second pass to fill the steps we have: fseek(fid,0,-1); %rewind -result=ReadDataDimensions(fid); -while ~isempty(result), - %read next result - result=ReadDataDimensions(fid); -end - -%third pass, this time to read the real information -fseek(fid,0,-1); %rewind -result=ReadData(fid,md); -while ~isempty(result), - - %Get time and step - results(result.step).step=result.step; - if result.time~=-9999, - results(result.step).time=result.time; +while 1, + result = ReadData(fid,md); + if isempty(result), + break; end - - %Add result - results(result.step).(result.fieldname)=result.field; - - %read next result - try, - result=ReadData(fid,md); - catch me, - disp('WARNING: file corrupted, results partial recovery'); - result=[]; + if result.step==-9999, + result.step=1; + result.time=0; end + %find where we are putting this step: + ind=find(steps==result.step); + if isempty(ind), + error('could not find a step in our pre-structure! Something is very off!'); + end + %plug: + results(ind).time=result.time; + results(ind).(result.fieldname)=result.field; end - -%close file fclose(fid); - % }}} +%}}} function result=ReadData(fid,md) % {{{ %read field @@ -311,11 +311,12 @@ field=temp_field; end + if time~=-9999, + time=time/yts; + end + result.fieldname=fieldname; result.time=time; - if result.time~=-9999, - result.time=time/yts; - end result.step=step; result.field=field; end Index: ../trunk-jpl/src/m/solve/loadresultsfromdisk.py =================================================================== --- ../trunk-jpl/src/m/solve/loadresultsfromdisk.py (revision 25687) +++ ../trunk-jpl/src/m/solve/loadresultsfromdisk.py (revision 25688) @@ -1,5 +1,7 @@ import os +import numpy as np # Remove: used temporarily for debugging + from helpers import fieldnames from parseresultsfromdisk import parseresultsfromdisk from postqmu import postqmu @@ -39,8 +41,8 @@ structure = parseresultsfromdisk(md, filename, not md.settings.io_gather) if not structure: raise RuntimeError("No result found in binary file '{}'. Check for solution crash.".format(filename)) - if not structure[0].SolutionType: - if structure[-1].SolutionType: + if not hasattr(structure[0], 'SolutionType'): + if hasattr(structure[-1], 'SolutionType'): structure[0].SolutionType = structure[-1].SolutionType else: print('Cannot find a solution type in the results! Ascribing one: \'NoneSolution\'.') @@ -64,7 +66,7 @@ setattr(getattr(md.results, structure[0].SolutionType)[0], 'outlog', '') if getattr(md.results, structure[0].SolutionType)[0].errlog: - print("loadresultsfromcluster info message: error during solution. Check your errlog and outlog model fields.") + print("loadresultsfromdisk info message: error during solution. Check your errlog and outlog model fields.") # If only one solution, extract it from list for user friendliness if len(structure) == 1 and structure[0].SolutionType != 'TransientSolution': Index: ../trunk-jpl/src/m/solve/loadresultsfromdisk.m =================================================================== --- ../trunk-jpl/src/m/solve/loadresultsfromdisk.m (revision 25687) +++ ../trunk-jpl/src/m/solve/loadresultsfromdisk.m (revision 25688) @@ -61,7 +61,7 @@ end if ~isempty(md.results.(structure(1).SolutionType)(1).errlog), - disp(['loadresultsfromcluster info message: error during solution. Check your errlog and outlog model fields']); + disp(['loadresultsfromdisk info message: error during solution. Check your errlog and outlog model fields']); end %post processes qmu results if necessary Index: ../trunk-jpl/src =================================================================== --- ../trunk-jpl/src (revision 25687) +++ ../trunk-jpl/src (revision 25688) Property changes on: ../trunk-jpl/src ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /issm/branches/trunk-larour-SLPS2020/src:r25330-25625