Index: ../trunk-jpl/src/m/modules/Scotch.py =================================================================== --- ../trunk-jpl/src/m/modules/Scotch.py (revision 25010) +++ ../trunk-jpl/src/m/modules/Scotch.py (revision 25011) @@ -1,7 +1,7 @@ from Scotch_python import Scotch_python -def Scotch(* varargin): +def Scotch(*varargin): '''SCOTCH - Scotch partitioner Usage: @@ -8,6 +8,6 @@ maptab = Scotch(adjmat, vertlb, vertwt, edgewt, archtyp, archpar, Scotch - specific parameters) ''' # Call mex module - maptab = Scotch_python(* varargin) + maptab = Scotch_python(*varargin) return maptab Index: ../trunk-jpl/src/m/boundaryconditions/love_numbers.py =================================================================== --- ../trunk-jpl/src/m/boundaryconditions/love_numbers.py (revision 25010) +++ ../trunk-jpl/src/m/boundaryconditions/love_numbers.py (revision 25011) @@ -1,7 +1,7 @@ import numpy as np -def love_numbers(value, * varargin): +def love_numbers(value, *varargin): '''LOVE_NUMBERS: provide love numbers (value 'h', 'k', 'l', 'gamma' and 'lambda' retrieved from: http://www.srosat.com/iag-jsg/loveNb.php Usage: series = love_numbers(value) Index: ../trunk-jpl/src/m/plot/export_gl.py =================================================================== --- ../trunk-jpl/src/m/plot/export_gl.py (revision 25010) +++ ../trunk-jpl/src/m/plot/export_gl.py (revision 25011) @@ -6,7 +6,7 @@ from writejsfile import writejsfile -def export_gl(md, * varargin): +def export_gl(md, *varargin): class ResultObj(object): def __getattr__(self, attr): return self.__dict__.get(attr) Index: ../trunk-jpl/src/m/classes/qmu/response_function.py =================================================================== --- ../trunk-jpl/src/m/classes/qmu/response_function.py (revision 25010) +++ ../trunk-jpl/src/m/classes/qmu/response_function.py (revision 25011) @@ -1,7 +1,7 @@ import numpy as np +from fielddisplay import fielddisplay from MatlabFuncs import * -from fielddisplay import fielddisplay from pairoptions import pairoptions from partition_npart import * from rlev_write import * @@ -102,17 +102,17 @@ return [rf] # Always return a list, so we have something akin to a MATLAB single row matrix - def __repr__(self): #{{{ + def __repr__(rf): #{{{ # display the object string = 'class "response_function" object = \n' - string = "%s\n%s" % (string, fielddisplay(self, 'descriptor', 'name tag')) - string = "%s\n%s" % (string, fielddisplay(self, 'respl', 'response levels')) - string = "%s\n%s" % (string, fielddisplay(self, 'probl', 'probability levels')) - string = "%s\n%s" % (string, fielddisplay(self, 'rell', 'reliability levels')) - string = "%s\n%s" % (string, fielddisplay(self, 'grell', 'general reliability levels')) + string = "%s\n%s" % (string, fielddisplay(rf, 'descriptor', 'name tag')) + string = "%s\n%s" % (string, fielddisplay(rf, 'respl', 'response levels')) + string = "%s\n%s" % (string, fielddisplay(rf, 'probl', 'probability levels')) + string = "%s\n%s" % (string, fielddisplay(rf, 'rell', 'reliability levels')) + string = "%s\n%s" % (string, fielddisplay(rf, 'grell', 'general reliability levels')) - if self.partition != []: - string = "%s\n%s" % (string, fielddisplay(self, 'partition', 'partition')) + if rf.partition != []: + string = "%s\n%s" % (string, fielddisplay(rf, 'partition', 'partition vector defining where the response will be computed')) return string #}}} Index: ../trunk-jpl/src/m/coordsystems/gmtmask.py =================================================================== --- ../trunk-jpl/src/m/coordsystems/gmtmask.py (revision 25010) +++ ../trunk-jpl/src/m/coordsystems/gmtmask.py (revision 25011) @@ -5,7 +5,7 @@ import subprocess -def gmtmask(lat, long, * varargin): +def gmtmask(lat, long, *varargin): '''GMTMASK - figure out which lat, long points are on the ocean Usage: Index: ../trunk-jpl/src/m/qmu/qmupart2npart.py =================================================================== --- ../trunk-jpl/src/m/qmu/qmupart2npart.py (nonexistent) +++ ../trunk-jpl/src/m/qmu/qmupart2npart.py (revision 25011) @@ -0,0 +1,10 @@ +import numpy as np + + +def qmupart2npart(vector): + # Vector is full of -1 (no partition) and 0 to npart. We need to identify + # npart. + + npart = vector.max() + 1 + + return npart Index: ../trunk-jpl/src/m/partition/partitioner.py =================================================================== --- ../trunk-jpl/src/m/partition/partitioner.py (revision 25010) +++ ../trunk-jpl/src/m/partition/partitioner.py (revision 25011) @@ -8,26 +8,26 @@ from mesh2d import * -def partitioner(md, * varargin): - help = ''' -PARTITIONER - partition mesh +def partitioner(md, *varargin): + ''' + PARTITIONER - partition mesh - List of options to partitioner: + List of options to partitioner: - package: 'chaco', 'metis' - npart: number of partitions. - weighting: 'on' or 'off': default off - section: 1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection) - recomputeadjacency: 'on' by default (set to 'off' to compute existing one) - type: 'node' or 'element' partition vector (default to 'node') - Output: md.qmu.partition recover the partition vector + package: 'chaco', 'metis' + npart: number of partitions. + weighting: 'on' or 'off': default off + section: 1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection) + recomputeadjacency: 'on' by default (set to 'off' to compute existing one) + type: 'node' or 'element' partition vector (default to 'node') + Output: partitionvector: the partition vector - Usage: - md = partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on') + Usage: + partitionvector = partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on') ''' #get options: - options = pairoptions(* varargin) + options = pairoptions(*varargin) #get options: section = options.getfieldvalue('section', 1) @@ -121,12 +121,5 @@ part = part.reshape(-1, 1) - if vectortype == 'element': - md.qmu.epartition = part - if np.size(md.qmu.vpartition) == 0 or (np.size(md.qmu.vpartition) == 1 and np.isnan(md.qmu.vpartition)): - md.qmu.vpartition = np.zeros((md.mesh.numberofvertices, 1)) - else: - md.qmu.vpartition = part - if np.size(md.qmu.epartition) == 0 or (np.size(md.qmu.epartition) == 1 and np.isnan(md.qmu.epartition)): - md.qmu.epartition = np.zeros((md.mesh.numberofelements, 1)) - return md + # Output + return part Index: ../trunk-jpl/src/m/classes/slr.py =================================================================== --- ../trunk-jpl/src/m/classes/slr.py (revision 25010) +++ ../trunk-jpl/src/m/classes/slr.py (revision 25011) @@ -1,8 +1,9 @@ +import numpy as np + +from checkfield import checkfield from fielddisplay import fielddisplay from MatlabFuncs import * from model import * -import numpy as np -from checkfield import checkfield from WriteData import WriteData @@ -38,10 +39,10 @@ self.geodetic_run_frequency = 1 #how many time steps we skip before we run the geodetic part of the solver during transient self.geodetic = 0 #compute geodetic SLR? (in addition to steric?) self.degacc = 0 - self.loop_increment = 0 self.horiz = 0 self.Ngia = float('NaN') self.Ugia = float('NaN') + self.planetradius = planetradius('earth') self.requested_outputs = [] self.transitions = [] @@ -71,7 +72,6 @@ string = "%s\n%s" % (string, fielddisplay(self, 'hydro_rate', 'rate of hydrological expansion [mm / yr]')) string = "%s\n%s" % (string, fielddisplay(self, 'Ngia', 'rate of viscous (GIA) geoid expansion (in mm / yr)')) string = "%s\n%s" % (string, fielddisplay(self, 'Ugia', 'rate of viscous (GIA) bedrock uplift (in mm / yr)')) - string = "%s\n%s" % (string, fielddisplay(self, 'loop_increment', 'vector assembly (in the convolution) framentation')) string = "%s\n%s" % (string, fielddisplay(self, 'geodetic', 'compute geodetic SLR? (in addition to steric?) default 0')) string = "%s\n%s" % (string, fielddisplay(self, 'geodetic_run_frequency', 'how many time steps we skip before we run SLR solver during transient (default: 1)')) string = "%s\n%s" % (string, fielddisplay(self, 'rigid', 'rigid earth graviational potential perturbation')) @@ -90,7 +90,6 @@ self.abstol = float('NaN') #1 mm of sea level rise #maximum of non - linear iterations. self.maxiter = 5 - self.loop_increment = 200 #computational flags: self.geodetic = 0 self.rigid = 1 @@ -120,6 +119,8 @@ self.transitions = [] #horizontal displacement? (not by default) self.horiz = 0 + #earth area + self.planetradius = planetradius('earth') return self #}}} @@ -149,7 +150,6 @@ md = checkfield(md, 'fieldname', 'slr.hydro_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'slr.degacc', 'size', [1, 1], '>=', 1e-10) md = checkfield(md, 'fieldname', 'slr.requested_outputs', 'stringrow', 1) - md = checkfield(md, 'fieldname', 'slr.loop_increment', 'NaN', 1, 'Inf', 1, '>=', 1) md = checkfield(md, 'fieldname', 'slr.horiz', 'NaN', 1, 'Inf', 1, 'values', [0, 1]) md = checkfield(md, 'fieldname', 'slr.Ngia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'slr.Ugia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) @@ -203,9 +203,9 @@ WriteData(fid, prefix, 'object', self, 'fieldname', 'Ugia', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1e-3 / md.constants.yts) WriteData(fid, prefix, 'object', self, 'fieldname', 'degacc', 'format', 'Double') WriteData(fid, prefix, 'object', self, 'fieldname', 'transitions', 'format', 'MatArray') - WriteData(fid, prefix, 'object', self, 'fieldname', 'loop_increment', 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'fieldname', 'horiz', 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'fieldname', 'geodetic', 'format', 'Integer') + WriteData(fid, prefix, 'object', self, 'fieldname', 'planetradius', 'format', 'Double') #process requested outputs outputs = self.requested_outputs Index: ../trunk-jpl/src/m/classes/qmu.py =================================================================== --- ../trunk-jpl/src/m/classes/qmu.py (revision 25010) +++ ../trunk-jpl/src/m/classes/qmu.py (revision 25011) @@ -1,13 +1,13 @@ import numpy as np -from MatlabFuncs import * -from IssmConfig import * -from project3d import project3d + +from checkfield import checkfield from collections import OrderedDict +from dakota_method import * from fielddisplay import fielddisplay -from checkfield import checkfield +from helpers import * +from IssmConfig import * +from MatlabFuncs import * from WriteData import WriteData -from helpers import * -from dakota_method import * class qmu(object): @@ -26,10 +26,13 @@ self.method = OrderedDict() self.params = OrderedStruct() self.results = OrderedDict() - self.numberofpartitions = 0 self.numberofresponses = 0 self.variabledescriptors = [] + self.variablepartitions = [] + self.variablepartitions_npart = [] self.responsedescriptors = [] + self.responsepartitions = [] + self.responsepartitions_npart = [] self.mass_flux_profile_directory = float('NaN') self.mass_flux_profiles = float('NaN') self.mass_flux_segments = [] @@ -108,9 +111,8 @@ size = np.shape(getattr(result, fname)) s += " %-*s: [%ix%i] '%s'\n" % (maxlen + 1, fname, a, b, type(getattr(result, fname))) - s += "%s\n" % fielddisplay(self, 'vpartition', 'user provided mesh partitioning (vertex based)') - s += "%s\n" % fielddisplay(self, 'epartition', 'user provided mesh partitioning (element based)') - s += "%s\n" % fielddisplay(self, 'numberofpartitions', 'number of partitions for semi - discrete qmu') + s += "%s\n" % fielddisplay(self, 'variablepartitions', '') + s += "%s\n" % fielddisplay(self, 'variablepartitions_npart', '') s += "%s\n" % fielddisplay(self, 'variabledescriptors', '') s += "%s\n" % fielddisplay(self, 'responsedescriptors', '') s += "%s\n" % fielddisplay(self, 'method', 'array of dakota_method class') @@ -123,8 +125,6 @@ return s # }}} def extrude(self, md): # {{{ - self.vpartition = project3d(md, 'vector', np.transpose(self.vpartition), 'type', 'node') - self.epartition = project3d(md, 'vector', np.transpose(self.epartition), 'type', 'element') return self #}}} def setdefaultparameters(self): # {{{ @@ -154,25 +154,11 @@ if np.mod(md.cluster.np - 1, self.params.processors_per_evaluation): md.checkmessage('in parallel library mode, the requirement is for md.cluster.np = md.qmu.params.processors_per_evaluation * number_of_slaves, where number_of_slaves will automatically be determined by Dakota. Modify md.cluster.np accordingly') - if np.size(md.qmu.vpartition) > 0: - if np.size(md.qmu.vpartition, 0) != md.mesh.numberofvertices: - md.checkmessage("user supplied vertex partition for qmu analysis should have size (md.mesh.numberofvertices x 1)") - if not min(md.qmu.vpartition.flatten()) == 0: - md.checkmessage("vertex partition vector not indexed from 0 on") - if max(md.qmu.vpartition.flatten()) >= md.qmu.numberofpartitions: - md.checkmessage("for qmu analysis, vertex partitioning vector cannot go over npart, number of partition areas") + # Go through variables and check for consistency + fv = fieldnames(self.variables) + for i in range(len(fv)): + getattr(self.variables, fv[i]).checkconsistency(md, solution, analyses) - if np.size(md.qmu.epartition) > 0: - if np.size(md.qmu.epartition, 0) != md.mesh.numberofelements: - md.checkmessage("user supplied element partition for qmu analysis should have size (md.mesh.numberofelements x 1)") - if not min(md.qmu.epartition.flatten()) == 0: - md.checkmessage("elememtn partition vector not indexed from 0 on") - if max(md.qmu.epartition.flatten()) >= md.qmu.numberofpartitions: - md.checkmessage("for qmu analysis, element partitioning vector cannot go over npart, number of partition areas") - - if np.size(md.qmu.vpartition) == 0 or np.any(np.isnan(md.qmu.vpartition)) or np.size(md.qmu.epartition) == 0 or np.any(np.isnan(md.qmu.epartition)): - md.checkmessage("for qmu analysis, both an element and partitioning vectors need to be supplied with no nan values! One can be defaulted to all zeros.") - return md # }}} def marshall(self, prefix, md, fid): # {{{ @@ -181,10 +167,13 @@ if not self.isdakota: WriteData(fid, prefix, 'data', False, 'name', 'md.qmu.mass_flux_segments_present', 'format', 'Boolean') return - WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofpartitions', 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofresponses', 'format', 'Integer') WriteData(fid, prefix, 'object', self, 'fieldname', 'variabledescriptors', 'format', 'StringArray') + WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions', 'format', 'MatArray') + WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_npart', 'format', 'IntMat', 'mattype', 3) WriteData(fid, prefix, 'object', self, 'fieldname', 'responsedescriptors', 'format', 'StringArray') + WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions', 'format', 'MatArray') + WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions_npart', 'format', 'IntMat', 'mattype', 3) if not isempty(self.mass_flux_segments): WriteData(fid, prefix, 'data', self.mass_flux_segments, 'name', 'md.qmu.mass_flux_segments', 'format', 'MatArray') flag = True Index: ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py =================================================================== --- ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py (revision 25010) +++ ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py (revision 25011) @@ -1,7 +1,8 @@ -from MatlabFuncs import * from copy import deepcopy from helpers import * +from MatlabFuncs import * from normal_uncertain import * +from qmupart2npart import * from uniform_uncertain import * @@ -16,17 +17,25 @@ #ok, key off according to type of descriptor: if strncmp(descriptor, 'scaled_', 7): - #we have a scaled variable, expand it over the partition. + #we have a scaled variable, expand it over the partition. First recover the partition. + partition = variables.partition + #figure out number of partitions + npart=qmupart2npart(partition) + if isinstance(variables, uniform_uncertain): - if ((type(variables.lower) in [list, np.ndarray] and len(variables.lower) > md.qmu.numberofpartitions) or (type(variables.upper) in [list, np.ndarray] and len(variables.upper) > md.qmu.numberofpartitions)): - raise RuntimeError('QmuSetupDesign error message: upper and lower should be either a scalar or a "npart" length vector') + nlower=len(variables.lower) + nupper=len(variables.upper) + if nlower != npart or nupper != npart: + raise RuntimeError('QmuSetupVariables error message: upper and lower fields should be same size as the number of partitions') elif isinstance(variables, normal_uncertain): - if type(variables.stddev) in [list, np.ndarray] and len(variables.stddev) > md.qmu.numberofpartitions: - raise RuntimeError('QmuSetupDesign error message: stddev should be either a scalar or a "npart" length vector') + nstddev=len(variables.stddev) + nmean=len(variables.mean) + if nstddev != npart or nmean != npart: + raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should be same size as the number of partitions') #ok, dealing with semi-discrete distributed variable. Distribute according to how many #partitions we want - for j in range(md.qmu.numberofpartitions): + for j in range(npart): dvar.append(deepcopy(variables)) # text parsing in dakota requires literal "'identifier'" not just "identifier" @@ -33,23 +42,11 @@ dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'" if isinstance(variables, uniform_uncertain): - if type(variables.lower) in [list, np.ndarray]: - dvar[-1].lower = variables.lower[j] - else: - dvar[-1].lower = variables.lower - if type(variables.upper) in [list, np.ndarray]: - dvar[-1].upper = variables.upper[j] - else: - dvar[-1].upper = variables.upper + dvar[-1].lower = variables.lower[j] + dvar[-1].upper = variables.upper[j] elif isinstance(variables, normal_uncertain): - if type(variables.stddev) in [list, np.ndarray]: - dvar[-1].stddev = variables.stddev[j] - else: - dvar[-1].stddev = variables.stddev - if type(variables.mean) in [list, np.ndarray]: - dvar[-1].mean = variables.mean[j] - else: - dvar[-1].mean = variables.mean + dvar[-1].stddev = variables.stddev[j] + dvar[-1].mean = variables.mean[j] else: dvar.append(deepcopy(variables)) Index: ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py =================================================================== --- ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py (revision 25010) +++ ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py (revision 25011) @@ -1,5 +1,6 @@ +from copy import deepcopy from MatlabFuncs import * -from copy import deepcopy +from qmupart2npart import * def QmuSetupResponses(md, dresp, responses): @@ -7,15 +8,18 @@ #get descriptor descriptor = responses.descriptor - #decide whether this is a distributed response, which will drive whether we expand it into npart values, - #or if we just carry it forward as is. + # Decide whether this is a distributed response, which will drive whether + # we expand it into npart values, or if we just carry it forward as is. #ok, key off according to type of descriptor: if strncmp(descriptor, 'scaled_', 7): #we have a scaled response, expand it over the partition. - #ok, dealing with semi - discrete distributed response. Distribute according to how many - #partitions we want - for j in range(md.qmu.numberofpartitions): + + # Ok, dealing with semi-discrete distributed response. Distribute + # according to how many partitions we want. + npart = qmupart2npart(responses.partition) + + for j in range(npart): dresp.append(deepcopy(responses)) dresp[-1].descriptor = str(responses.descriptor) + '_' + str(j + 1) else: Index: ../trunk-jpl/src/m/qmu/preqmu.py =================================================================== --- ../trunk-jpl/src/m/qmu/preqmu.py (revision 25010) +++ ../trunk-jpl/src/m/qmu/preqmu.py (revision 25011) @@ -1,10 +1,12 @@ import os -from MatlabFuncs import * + +from dakota_in_data import * +from expandresponses import * from expandvariables import * -from expandresponses import * from helpers import * -from dakota_in_data import * +from MatlabFuncs import * from process_qmu_response_data import * +from qmupart2npart import * def preqmu(md, options): @@ -105,10 +107,42 @@ responsedescriptors.append(fieldresponses[j].descriptor) #}}} + # Build a list of variable partitions + variablepartitions = [] + variablepartitions_npart = [] + variable_fieldnames = fieldnames(md.qmu.variables) + for in range(len(variable_fieldnames)): + field_name = variable_fieldnames[i] + fieldvariable = getattr(md.qmu.variables, field_name) + if fieldvariable.isscaled(): + variablepartitions.append(fieldvariable.partition) + variablepartitions_npart.append(qmupart2npart(fieldvariable.partition)) + else: + variablepartitions.append([]) + variablepartitions_npart.append(0) + + # Build a list of response partitions + responsepartitions = [] + responsepartitions_npart = [] + response_fieldnames = fieldnames(md.qmu.responses) + for in range(len(response_fieldnames)): + field_name = response_fieldnames[i] + fieldresponse = getattr(md.qmu.variables, field_name) + if fieldresponse.isscaled(): + responsepartitions.append(fieldresponse.partition) + responsepartitions_npart.append(qmupart2npart(fieldresponse.partition)) + else: + responsepartitions.append([]) + responsepartitions_npart.append(0) + # register the fields that will be needed by the Qmu model. md.qmu.numberofresponses = numresponses md.qmu.variabledescriptors = variabledescriptors + md.qmu.variablepartitions = variablepartitions + md.qmu.variablepartitions_npart = variablepartitions_npart md.qmu.responsedescriptors = responsedescriptors + md.qmu.responsepartitions = responsepartitions + md.qmu.responsepartitions_npart = responsepartitions_npart # now, we have to provide all the info necessary for the solutions to compute the # responses. For ex, if mass_flux is a response, we need a profile of points.