source:
issm/oecreview/Archive/24684-25833/ISSM-25010-25011.diff
Last change on this file was 25834, checked in by , 4 years ago | |
---|---|
File size: 24.2 KB |
-
../trunk-jpl/src/m/modules/Scotch.py
1 1 from Scotch_python import Scotch_python 2 2 3 3 4 def Scotch(* 4 def Scotch(*varargin): 5 5 '''SCOTCH - Scotch partitioner 6 6 7 7 Usage: … … 8 8 maptab = Scotch(adjmat, vertlb, vertwt, edgewt, archtyp, archpar, Scotch - specific parameters) 9 9 ''' 10 10 # Call mex module 11 maptab = Scotch_python(* 11 maptab = Scotch_python(*varargin) 12 12 13 13 return maptab -
../trunk-jpl/src/m/boundaryconditions/love_numbers.py
1 1 import numpy as np 2 2 3 3 4 def love_numbers(value, * 4 def love_numbers(value, *varargin): 5 5 '''LOVE_NUMBERS: provide love numbers (value 'h', 'k', 'l', 'gamma' and 'lambda' 6 6 retrieved from: http://www.srosat.com/iag-jsg/loveNb.php 7 7 Usage: series = love_numbers(value) -
../trunk-jpl/src/m/plot/export_gl.py
6 6 from writejsfile import writejsfile 7 7 8 8 9 def export_gl(md, * 9 def export_gl(md, *varargin): 10 10 class ResultObj(object): 11 11 def __getattr__(self, attr): 12 12 return self.__dict__.get(attr) -
../trunk-jpl/src/m/classes/qmu/response_function.py
1 1 import numpy as np 2 2 3 from fielddisplay import fielddisplay 3 4 from MatlabFuncs import * 4 from fielddisplay import fielddisplay5 5 from pairoptions import pairoptions 6 6 from partition_npart import * 7 7 from rlev_write import * … … 102 102 103 103 return [rf] # Always return a list, so we have something akin to a MATLAB single row matrix 104 104 105 def __repr__( self): #{{{105 def __repr__(rf): #{{{ 106 106 # display the object 107 107 string = 'class "response_function" object = \n' 108 string = "%s\n%s" % (string, fielddisplay( self, 'descriptor', 'name tag'))109 string = "%s\n%s" % (string, fielddisplay( self, 'respl', 'response levels'))110 string = "%s\n%s" % (string, fielddisplay( self, 'probl', 'probability levels'))111 string = "%s\n%s" % (string, fielddisplay( self, 'rell', 'reliability levels'))112 string = "%s\n%s" % (string, fielddisplay( self, 'grell', 'general reliability levels'))108 string = "%s\n%s" % (string, fielddisplay(rf, 'descriptor', 'name tag')) 109 string = "%s\n%s" % (string, fielddisplay(rf, 'respl', 'response levels')) 110 string = "%s\n%s" % (string, fielddisplay(rf, 'probl', 'probability levels')) 111 string = "%s\n%s" % (string, fielddisplay(rf, 'rell', 'reliability levels')) 112 string = "%s\n%s" % (string, fielddisplay(rf, 'grell', 'general reliability levels')) 113 113 114 if self.partition != []:115 string = "%s\n%s" % (string, fielddisplay( self, 'partition', 'partition'))114 if rf.partition != []: 115 string = "%s\n%s" % (string, fielddisplay(rf, 'partition', 'partition vector defining where the response will be computed')) 116 116 117 117 return string 118 118 #}}} -
../trunk-jpl/src/m/coordsystems/gmtmask.py
5 5 import subprocess 6 6 7 7 8 def gmtmask(lat, long, * 8 def gmtmask(lat, long, *varargin): 9 9 '''GMTMASK - figure out which lat, long points are on the ocean 10 10 11 11 Usage: -
../trunk-jpl/src/m/qmu/qmupart2npart.py
1 import numpy as np 2 3 4 def qmupart2npart(vector): 5 # Vector is full of -1 (no partition) and 0 to npart. We need to identify 6 # npart. 7 8 npart = vector.max() + 1 9 10 return npart -
../trunk-jpl/src/m/partition/partitioner.py
8 8 from mesh2d import * 9 9 10 10 11 def partitioner(md, * 12 help ='''13 PARTITIONER - partition mesh11 def partitioner(md, *varargin): 12 ''' 13 PARTITIONER - partition mesh 14 14 15 List of options to partitioner:15 List of options to partitioner: 16 16 17 package: 'chaco', 'metis'18 npart: number of partitions.19 weighting: 'on' or 'off': default off20 section: 1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection)21 recomputeadjacency: 'on' by default (set to 'off' to compute existing one)22 type: 'node' or 'element' partition vector (default to 'node')23 Output: md.qmu.partition recoverthe partition vector17 package: 'chaco', 'metis' 18 npart: number of partitions. 19 weighting: 'on' or 'off': default off 20 section: 1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection) 21 recomputeadjacency: 'on' by default (set to 'off' to compute existing one) 22 type: 'node' or 'element' partition vector (default to 'node') 23 Output: partitionvector: the partition vector 24 24 25 Usage:26 md= partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on')25 Usage: 26 partitionvector = partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on') 27 27 ''' 28 28 29 29 #get options: 30 options = pairoptions(* 30 options = pairoptions(*varargin) 31 31 32 32 #get options: 33 33 section = options.getfieldvalue('section', 1) … … 121 121 122 122 part = part.reshape(-1, 1) 123 123 124 if vectortype == 'element': 125 md.qmu.epartition = part 126 if np.size(md.qmu.vpartition) == 0 or (np.size(md.qmu.vpartition) == 1 and np.isnan(md.qmu.vpartition)): 127 md.qmu.vpartition = np.zeros((md.mesh.numberofvertices, 1)) 128 else: 129 md.qmu.vpartition = part 130 if np.size(md.qmu.epartition) == 0 or (np.size(md.qmu.epartition) == 1 and np.isnan(md.qmu.epartition)): 131 md.qmu.epartition = np.zeros((md.mesh.numberofelements, 1)) 132 return md 124 # Output 125 return part -
../trunk-jpl/src/m/classes/slr.py
1 import numpy as np 2 3 from checkfield import checkfield 1 4 from fielddisplay import fielddisplay 2 5 from MatlabFuncs import * 3 6 from model import * 4 import numpy as np5 from checkfield import checkfield6 7 from WriteData import WriteData 7 8 8 9 … … 38 39 self.geodetic_run_frequency = 1 #how many time steps we skip before we run the geodetic part of the solver during transient 39 40 self.geodetic = 0 #compute geodetic SLR? (in addition to steric?) 40 41 self.degacc = 0 41 self.loop_increment = 042 42 self.horiz = 0 43 43 self.Ngia = float('NaN') 44 44 self.Ugia = float('NaN') 45 self.planetradius = planetradius('earth') 45 46 self.requested_outputs = [] 46 47 self.transitions = [] 47 48 … … 71 72 string = "%s\n%s" % (string, fielddisplay(self, 'hydro_rate', 'rate of hydrological expansion [mm / yr]')) 72 73 string = "%s\n%s" % (string, fielddisplay(self, 'Ngia', 'rate of viscous (GIA) geoid expansion (in mm / yr)')) 73 74 string = "%s\n%s" % (string, fielddisplay(self, 'Ugia', 'rate of viscous (GIA) bedrock uplift (in mm / yr)')) 74 string = "%s\n%s" % (string, fielddisplay(self, 'loop_increment', 'vector assembly (in the convolution) framentation'))75 75 string = "%s\n%s" % (string, fielddisplay(self, 'geodetic', 'compute geodetic SLR? (in addition to steric?) default 0')) 76 76 string = "%s\n%s" % (string, fielddisplay(self, 'geodetic_run_frequency', 'how many time steps we skip before we run SLR solver during transient (default: 1)')) 77 77 string = "%s\n%s" % (string, fielddisplay(self, 'rigid', 'rigid earth graviational potential perturbation')) … … 90 90 self.abstol = float('NaN') #1 mm of sea level rise 91 91 #maximum of non - linear iterations. 92 92 self.maxiter = 5 93 self.loop_increment = 20094 93 #computational flags: 95 94 self.geodetic = 0 96 95 self.rigid = 1 … … 120 119 self.transitions = [] 121 120 #horizontal displacement? (not by default) 122 121 self.horiz = 0 122 #earth area 123 self.planetradius = planetradius('earth') 123 124 124 125 return self 125 126 #}}} … … 149 150 md = checkfield(md, 'fieldname', 'slr.hydro_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) 150 151 md = checkfield(md, 'fieldname', 'slr.degacc', 'size', [1, 1], '>=', 1e-10) 151 152 md = checkfield(md, 'fieldname', 'slr.requested_outputs', 'stringrow', 1) 152 md = checkfield(md, 'fieldname', 'slr.loop_increment', 'NaN', 1, 'Inf', 1, '>=', 1)153 153 md = checkfield(md, 'fieldname', 'slr.horiz', 'NaN', 1, 'Inf', 1, 'values', [0, 1]) 154 154 md = checkfield(md, 'fieldname', 'slr.Ngia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) 155 155 md = checkfield(md, 'fieldname', 'slr.Ugia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) … … 203 203 WriteData(fid, prefix, 'object', self, 'fieldname', 'Ugia', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1e-3 / md.constants.yts) 204 204 WriteData(fid, prefix, 'object', self, 'fieldname', 'degacc', 'format', 'Double') 205 205 WriteData(fid, prefix, 'object', self, 'fieldname', 'transitions', 'format', 'MatArray') 206 WriteData(fid, prefix, 'object', self, 'fieldname', 'loop_increment', 'format', 'Integer')207 206 WriteData(fid, prefix, 'object', self, 'fieldname', 'horiz', 'format', 'Integer') 208 207 WriteData(fid, prefix, 'object', self, 'fieldname', 'geodetic', 'format', 'Integer') 208 WriteData(fid, prefix, 'object', self, 'fieldname', 'planetradius', 'format', 'Double') 209 209 210 210 #process requested outputs 211 211 outputs = self.requested_outputs -
../trunk-jpl/src/m/classes/qmu.py
1 1 import numpy as np 2 from MatlabFuncs import * 3 from IssmConfig import * 4 from project3d import project3d 2 3 from checkfield import checkfield 5 4 from collections import OrderedDict 5 from dakota_method import * 6 6 from fielddisplay import fielddisplay 7 from checkfield import checkfield 7 from helpers import * 8 from IssmConfig import * 9 from MatlabFuncs import * 8 10 from WriteData import WriteData 9 from helpers import *10 from dakota_method import *11 11 12 12 13 13 class qmu(object): … … 26 26 self.method = OrderedDict() 27 27 self.params = OrderedStruct() 28 28 self.results = OrderedDict() 29 self.numberofpartitions = 030 29 self.numberofresponses = 0 31 30 self.variabledescriptors = [] 31 self.variablepartitions = [] 32 self.variablepartitions_npart = [] 32 33 self.responsedescriptors = [] 34 self.responsepartitions = [] 35 self.responsepartitions_npart = [] 33 36 self.mass_flux_profile_directory = float('NaN') 34 37 self.mass_flux_profiles = float('NaN') 35 38 self.mass_flux_segments = [] … … 108 111 size = np.shape(getattr(result, fname)) 109 112 s += " %-*s: [%ix%i] '%s'\n" % (maxlen + 1, fname, a, b, type(getattr(result, fname))) 110 113 111 s += "%s\n" % fielddisplay(self, 'vpartition', 'user provided mesh partitioning (vertex based)') 112 s += "%s\n" % fielddisplay(self, 'epartition', 'user provided mesh partitioning (element based)') 113 s += "%s\n" % fielddisplay(self, 'numberofpartitions', 'number of partitions for semi - discrete qmu') 114 s += "%s\n" % fielddisplay(self, 'variablepartitions', '') 115 s += "%s\n" % fielddisplay(self, 'variablepartitions_npart', '') 114 116 s += "%s\n" % fielddisplay(self, 'variabledescriptors', '') 115 117 s += "%s\n" % fielddisplay(self, 'responsedescriptors', '') 116 118 s += "%s\n" % fielddisplay(self, 'method', 'array of dakota_method class') … … 123 125 return s 124 126 # }}} 125 127 def extrude(self, md): # {{{ 126 self.vpartition = project3d(md, 'vector', np.transpose(self.vpartition), 'type', 'node')127 self.epartition = project3d(md, 'vector', np.transpose(self.epartition), 'type', 'element')128 128 return self 129 129 #}}} 130 130 def setdefaultparameters(self): # {{{ … … 154 154 if np.mod(md.cluster.np - 1, self.params.processors_per_evaluation): 155 155 md.checkmessage('in parallel library mode, the requirement is for md.cluster.np = md.qmu.params.processors_per_evaluation * number_of_slaves, where number_of_slaves will automatically be determined by Dakota. Modify md.cluster.np accordingly') 156 156 157 if np.size(md.qmu.vpartition) > 0: 158 if np.size(md.qmu.vpartition, 0) != md.mesh.numberofvertices: 159 md.checkmessage("user supplied vertex partition for qmu analysis should have size (md.mesh.numberofvertices x 1)") 160 if not min(md.qmu.vpartition.flatten()) == 0: 161 md.checkmessage("vertex partition vector not indexed from 0 on") 162 if max(md.qmu.vpartition.flatten()) >= md.qmu.numberofpartitions: 163 md.checkmessage("for qmu analysis, vertex partitioning vector cannot go over npart, number of partition areas") 157 # Go through variables and check for consistency 158 fv = fieldnames(self.variables) 159 for i in range(len(fv)): 160 getattr(self.variables, fv[i]).checkconsistency(md, solution, analyses) 164 161 165 if np.size(md.qmu.epartition) > 0:166 if np.size(md.qmu.epartition, 0) != md.mesh.numberofelements:167 md.checkmessage("user supplied element partition for qmu analysis should have size (md.mesh.numberofelements x 1)")168 if not min(md.qmu.epartition.flatten()) == 0:169 md.checkmessage("elememtn partition vector not indexed from 0 on")170 if max(md.qmu.epartition.flatten()) >= md.qmu.numberofpartitions:171 md.checkmessage("for qmu analysis, element partitioning vector cannot go over npart, number of partition areas")172 173 if np.size(md.qmu.vpartition) == 0 or np.any(np.isnan(md.qmu.vpartition)) or np.size(md.qmu.epartition) == 0 or np.any(np.isnan(md.qmu.epartition)):174 md.checkmessage("for qmu analysis, both an element and partitioning vectors need to be supplied with no nan values! One can be defaulted to all zeros.")175 176 162 return md 177 163 # }}} 178 164 def marshall(self, prefix, md, fid): # {{{ … … 181 167 if not self.isdakota: 182 168 WriteData(fid, prefix, 'data', False, 'name', 'md.qmu.mass_flux_segments_present', 'format', 'Boolean') 183 169 return 184 WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofpartitions', 'format', 'Integer')185 170 WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofresponses', 'format', 'Integer') 186 171 WriteData(fid, prefix, 'object', self, 'fieldname', 'variabledescriptors', 'format', 'StringArray') 172 WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions', 'format', 'MatArray') 173 WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_npart', 'format', 'IntMat', 'mattype', 3) 187 174 WriteData(fid, prefix, 'object', self, 'fieldname', 'responsedescriptors', 'format', 'StringArray') 175 WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions', 'format', 'MatArray') 176 WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions_npart', 'format', 'IntMat', 'mattype', 3) 188 177 if not isempty(self.mass_flux_segments): 189 178 WriteData(fid, prefix, 'data', self.mass_flux_segments, 'name', 'md.qmu.mass_flux_segments', 'format', 'MatArray') 190 179 flag = True -
../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py
1 from MatlabFuncs import *2 1 from copy import deepcopy 3 2 from helpers import * 3 from MatlabFuncs import * 4 4 from normal_uncertain import * 5 from qmupart2npart import * 5 6 from uniform_uncertain import * 6 7 7 8 … … 16 17 17 18 #ok, key off according to type of descriptor: 18 19 if strncmp(descriptor, 'scaled_', 7): 19 #we have a scaled variable, expand it over the partition. 20 #we have a scaled variable, expand it over the partition. First recover the partition. 21 partition = variables.partition 22 #figure out number of partitions 23 npart=qmupart2npart(partition) 24 20 25 if isinstance(variables, uniform_uncertain): 21 if ((type(variables.lower) in [list, np.ndarray] and len(variables.lower) > md.qmu.numberofpartitions) or (type(variables.upper) in [list, np.ndarray] and len(variables.upper) > md.qmu.numberofpartitions)): 22 raise RuntimeError('QmuSetupDesign error message: upper and lower should be either a scalar or a "npart" length vector') 26 nlower=len(variables.lower) 27 nupper=len(variables.upper) 28 if nlower != npart or nupper != npart: 29 raise RuntimeError('QmuSetupVariables error message: upper and lower fields should be same size as the number of partitions') 23 30 elif isinstance(variables, normal_uncertain): 24 if type(variables.stddev) in [list, np.ndarray] and len(variables.stddev) > md.qmu.numberofpartitions: 25 raise RuntimeError('QmuSetupDesign error message: stddev should be either a scalar or a "npart" length vector') 31 nstddev=len(variables.stddev) 32 nmean=len(variables.mean) 33 if nstddev != npart or nmean != npart: 34 raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should be same size as the number of partitions') 26 35 27 36 #ok, dealing with semi-discrete distributed variable. Distribute according to how many 28 37 #partitions we want 29 for j in range( md.qmu.numberofpartitions):38 for j in range(npart): 30 39 dvar.append(deepcopy(variables)) 31 40 32 41 # text parsing in dakota requires literal "'identifier'" not just "identifier" … … 33 42 dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'" 34 43 35 44 if isinstance(variables, uniform_uncertain): 36 if type(variables.lower) in [list, np.ndarray]: 37 dvar[-1].lower = variables.lower[j] 38 else: 39 dvar[-1].lower = variables.lower 40 if type(variables.upper) in [list, np.ndarray]: 41 dvar[-1].upper = variables.upper[j] 42 else: 43 dvar[-1].upper = variables.upper 45 dvar[-1].lower = variables.lower[j] 46 dvar[-1].upper = variables.upper[j] 44 47 elif isinstance(variables, normal_uncertain): 45 if type(variables.stddev) in [list, np.ndarray]: 46 dvar[-1].stddev = variables.stddev[j] 47 else: 48 dvar[-1].stddev = variables.stddev 49 if type(variables.mean) in [list, np.ndarray]: 50 dvar[-1].mean = variables.mean[j] 51 else: 52 dvar[-1].mean = variables.mean 48 dvar[-1].stddev = variables.stddev[j] 49 dvar[-1].mean = variables.mean[j] 53 50 else: 54 51 dvar.append(deepcopy(variables)) 55 52 -
../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py
1 from copy import deepcopy 1 2 from MatlabFuncs import * 2 from copy import deepcopy3 from qmupart2npart import * 3 4 4 5 5 6 def QmuSetupResponses(md, dresp, responses): … … 7 8 #get descriptor 8 9 descriptor = responses.descriptor 9 10 10 # decide whether this is a distributed response, which will drive whether we expand it into npart values,11 # or if we just carry it forward as is.11 # Decide whether this is a distributed response, which will drive whether 12 # we expand it into npart values, or if we just carry it forward as is. 12 13 13 14 #ok, key off according to type of descriptor: 14 15 if strncmp(descriptor, 'scaled_', 7): 15 16 #we have a scaled response, expand it over the partition. 16 #ok, dealing with semi - discrete distributed response. Distribute according to how many 17 #partitions we want 18 for j in range(md.qmu.numberofpartitions): 17 18 # Ok, dealing with semi-discrete distributed response. Distribute 19 # according to how many partitions we want. 20 npart = qmupart2npart(responses.partition) 21 22 for j in range(npart): 19 23 dresp.append(deepcopy(responses)) 20 24 dresp[-1].descriptor = str(responses.descriptor) + '_' + str(j + 1) 21 25 else: -
../trunk-jpl/src/m/qmu/preqmu.py
1 1 import os 2 from MatlabFuncs import * 2 3 from dakota_in_data import * 4 from expandresponses import * 3 5 from expandvariables import * 4 from expandresponses import *5 6 from helpers import * 6 from dakota_in_dataimport *7 from MatlabFuncs import * 7 8 from process_qmu_response_data import * 9 from qmupart2npart import * 8 10 9 11 10 12 def preqmu(md, options): … … 105 107 responsedescriptors.append(fieldresponses[j].descriptor) 106 108 #}}} 107 109 110 # Build a list of variable partitions 111 variablepartitions = [] 112 variablepartitions_npart = [] 113 variable_fieldnames = fieldnames(md.qmu.variables) 114 for in range(len(variable_fieldnames)): 115 field_name = variable_fieldnames[i] 116 fieldvariable = getattr(md.qmu.variables, field_name) 117 if fieldvariable.isscaled(): 118 variablepartitions.append(fieldvariable.partition) 119 variablepartitions_npart.append(qmupart2npart(fieldvariable.partition)) 120 else: 121 variablepartitions.append([]) 122 variablepartitions_npart.append(0) 123 124 # Build a list of response partitions 125 responsepartitions = [] 126 responsepartitions_npart = [] 127 response_fieldnames = fieldnames(md.qmu.responses) 128 for in range(len(response_fieldnames)): 129 field_name = response_fieldnames[i] 130 fieldresponse = getattr(md.qmu.variables, field_name) 131 if fieldresponse.isscaled(): 132 responsepartitions.append(fieldresponse.partition) 133 responsepartitions_npart.append(qmupart2npart(fieldresponse.partition)) 134 else: 135 responsepartitions.append([]) 136 responsepartitions_npart.append(0) 137 108 138 # register the fields that will be needed by the Qmu model. 109 139 md.qmu.numberofresponses = numresponses 110 140 md.qmu.variabledescriptors = variabledescriptors 141 md.qmu.variablepartitions = variablepartitions 142 md.qmu.variablepartitions_npart = variablepartitions_npart 111 143 md.qmu.responsedescriptors = responsedescriptors 144 md.qmu.responsepartitions = responsepartitions 145 md.qmu.responsepartitions_npart = responsepartitions_npart 112 146 113 147 # now, we have to provide all the info necessary for the solutions to compute the 114 148 # responses. For ex, if mass_flux is a response, we need a profile of points.
Note:
See TracBrowser
for help on using the repository browser.