source: issm/oecreview/Archive/24684-25833/ISSM-25010-25011.diff

Last change on this file was 25834, checked in by Mathieu Morlighem, 4 years ago

CHG: added 24684-25833

File size: 24.2 KB
  • ../trunk-jpl/src/m/modules/Scotch.py

     
    11from Scotch_python import Scotch_python
    22
    33
    4 def Scotch(* varargin):
     4def Scotch(*varargin):
    55    '''SCOTCH - Scotch partitioner
    66
    77   Usage:
     
    88      maptab = Scotch(adjmat, vertlb, vertwt, edgewt, archtyp, archpar, Scotch - specific parameters)
    99'''
    1010    # Call mex module
    11     maptab = Scotch_python(* varargin)
     11    maptab = Scotch_python(*varargin)
    1212
    1313    return maptab
  • ../trunk-jpl/src/m/boundaryconditions/love_numbers.py

     
    11import numpy as np
    22
    33
    4 def love_numbers(value, * varargin):
     4def love_numbers(value, *varargin):
    55    '''LOVE_NUMBERS: provide love numbers (value 'h', 'k', 'l', 'gamma' and 'lambda'
    66             retrieved from: http://www.srosat.com/iag-jsg/loveNb.php
    77    Usage:   series = love_numbers(value)
  • ../trunk-jpl/src/m/plot/export_gl.py

     
    66from writejsfile import writejsfile
    77
    88
    9 def export_gl(md, * varargin):
     9def export_gl(md, *varargin):
    1010    class ResultObj(object):
    1111        def __getattr__(self, attr):
    1212            return self.__dict__.get(attr)
  • ../trunk-jpl/src/m/classes/qmu/response_function.py

     
    11import numpy as np
    22
     3from fielddisplay import fielddisplay
    34from MatlabFuncs import *
    4 from fielddisplay import fielddisplay
    55from pairoptions import pairoptions
    66from partition_npart import *
    77from rlev_write import *
     
    102102
    103103        return [rf] # Always return a list, so we have something akin to a MATLAB single row matrix
    104104
    105     def __repr__(self): #{{{
     105    def __repr__(rf): #{{{
    106106        # display the object
    107107        string = 'class "response_function" object = \n'
    108         string = "%s\n%s" % (string, fielddisplay(self, 'descriptor', 'name tag'))
    109         string = "%s\n%s" % (string, fielddisplay(self, 'respl', 'response levels'))
    110         string = "%s\n%s" % (string, fielddisplay(self, 'probl', 'probability levels'))
    111         string = "%s\n%s" % (string, fielddisplay(self, 'rell', 'reliability levels'))
    112         string = "%s\n%s" % (string, fielddisplay(self, 'grell', 'general reliability levels'))
     108        string = "%s\n%s" % (string, fielddisplay(rf, 'descriptor', 'name tag'))
     109        string = "%s\n%s" % (string, fielddisplay(rf, 'respl', 'response levels'))
     110        string = "%s\n%s" % (string, fielddisplay(rf, 'probl', 'probability levels'))
     111        string = "%s\n%s" % (string, fielddisplay(rf, 'rell', 'reliability levels'))
     112        string = "%s\n%s" % (string, fielddisplay(rf, 'grell', 'general reliability levels'))
    113113
    114         if self.partition != []:
    115             string = "%s\n%s" % (string, fielddisplay(self, 'partition', 'partition'))
     114        if rf.partition != []:
     115            string = "%s\n%s" % (string, fielddisplay(rf, 'partition', 'partition vector defining where the response will be computed'))
    116116
    117117        return string
    118118    #}}}
  • ../trunk-jpl/src/m/coordsystems/gmtmask.py

     
    55import subprocess
    66
    77
    8 def gmtmask(lat, long, * varargin):
     8def gmtmask(lat, long, *varargin):
    99    '''GMTMASK - figure out which lat, long points are on the ocean
    1010
    1111    Usage:
  • ../trunk-jpl/src/m/qmu/qmupart2npart.py

     
     1import numpy as np
     2
     3
     4def qmupart2npart(vector):
     5    # Vector is full of -1 (no partition) and 0 to npart. We need to identify
     6    # npart.
     7
     8    npart = vector.max() + 1
     9
     10    return npart
  • ../trunk-jpl/src/m/partition/partitioner.py

     
    88from mesh2d import *
    99
    1010
    11 def partitioner(md, * varargin):
    12     help = '''
    13 PARTITIONER - partition mesh
     11def partitioner(md, *varargin):
     12    '''
     13    PARTITIONER - partition mesh
    1414
    15    List of options to partitioner:
     15    List of options to partitioner:
    1616
    17    package: 'chaco', 'metis'
    18    npart: number of partitions.
    19    weighting: 'on' or 'off': default off
    20    section:  1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection)
    21    recomputeadjacency:  'on' by default (set to 'off' to compute existing one)
    22    type: 'node' or 'element' partition vector (default to 'node')
    23    Output: md.qmu.partition recover the partition vector
     17    package: 'chaco', 'metis'
     18    npart: number of partitions.
     19    weighting: 'on' or 'off': default off
     20    section:  1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection)
     21    recomputeadjacency:  'on' by default (set to 'off' to compute existing one)
     22    type: 'node' or 'element' partition vector (default to 'node')
     23    Output: partitionvector: the partition vector
    2424
    25    Usage:
    26       md = partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on')
     25    Usage:
     26        partitionvector = partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on')
    2727    '''
    2828
    2929    #get options:
    30     options = pairoptions(* varargin)
     30    options = pairoptions(*varargin)
    3131
    3232    #get options:
    3333    section = options.getfieldvalue('section', 1)
     
    121121
    122122        part = part.reshape(-1, 1)
    123123
    124     if vectortype == 'element':
    125         md.qmu.epartition = part
    126         if np.size(md.qmu.vpartition) == 0 or (np.size(md.qmu.vpartition) == 1 and np.isnan(md.qmu.vpartition)):
    127             md.qmu.vpartition = np.zeros((md.mesh.numberofvertices, 1))
    128     else:
    129         md.qmu.vpartition = part
    130         if np.size(md.qmu.epartition) == 0 or (np.size(md.qmu.epartition) == 1 and np.isnan(md.qmu.epartition)):
    131             md.qmu.epartition = np.zeros((md.mesh.numberofelements, 1))
    132     return md
     124    # Output
     125    return part
  • ../trunk-jpl/src/m/classes/slr.py

     
     1import numpy as np
     2
     3from checkfield import checkfield
    14from fielddisplay import fielddisplay
    25from MatlabFuncs import *
    36from model import *
    4 import numpy as np
    5 from checkfield import checkfield
    67from WriteData import WriteData
    78
    89
     
    3839        self.geodetic_run_frequency = 1  #how many time steps we skip before we run the geodetic part of the solver during transient
    3940        self.geodetic = 0  #compute geodetic SLR? (in addition to steric?)
    4041        self.degacc = 0
    41         self.loop_increment = 0
    4242        self.horiz = 0
    4343        self.Ngia = float('NaN')
    4444        self.Ugia = float('NaN')
     45        self.planetradius = planetradius('earth')
    4546        self.requested_outputs = []
    4647        self.transitions = []
    4748
     
    7172        string = "%s\n%s" % (string, fielddisplay(self, 'hydro_rate', 'rate of hydrological expansion [mm / yr]'))
    7273        string = "%s\n%s" % (string, fielddisplay(self, 'Ngia', 'rate of viscous (GIA) geoid expansion (in mm / yr)'))
    7374        string = "%s\n%s" % (string, fielddisplay(self, 'Ugia', 'rate of viscous (GIA) bedrock uplift (in mm / yr)'))
    74         string = "%s\n%s" % (string, fielddisplay(self, 'loop_increment', 'vector assembly (in the convolution) framentation'))
    7575        string = "%s\n%s" % (string, fielddisplay(self, 'geodetic', 'compute geodetic SLR? (in addition to steric?) default 0'))
    7676        string = "%s\n%s" % (string, fielddisplay(self, 'geodetic_run_frequency', 'how many time steps we skip before we run SLR solver during transient (default: 1)'))
    7777        string = "%s\n%s" % (string, fielddisplay(self, 'rigid', 'rigid earth graviational potential perturbation'))
     
    9090        self.abstol = float('NaN')  #1 mm of sea level rise
    9191        #maximum of non - linear iterations.
    9292        self.maxiter = 5
    93         self.loop_increment = 200
    9493        #computational flags:
    9594        self.geodetic = 0
    9695        self.rigid = 1
     
    120119        self.transitions = []
    121120        #horizontal displacement?  (not by default)
    122121        self.horiz = 0
     122        #earth area
     123        self.planetradius = planetradius('earth')
    123124
    124125        return self
    125126    #}}}
     
    149150        md = checkfield(md, 'fieldname', 'slr.hydro_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
    150151        md = checkfield(md, 'fieldname', 'slr.degacc', 'size', [1, 1], '>=', 1e-10)
    151152        md = checkfield(md, 'fieldname', 'slr.requested_outputs', 'stringrow', 1)
    152         md = checkfield(md, 'fieldname', 'slr.loop_increment', 'NaN', 1, 'Inf', 1, '>=', 1)
    153153        md = checkfield(md, 'fieldname', 'slr.horiz', 'NaN', 1, 'Inf', 1, 'values', [0, 1])
    154154        md = checkfield(md, 'fieldname', 'slr.Ngia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
    155155        md = checkfield(md, 'fieldname', 'slr.Ugia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
     
    203203        WriteData(fid, prefix, 'object', self, 'fieldname', 'Ugia', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1e-3 / md.constants.yts)
    204204        WriteData(fid, prefix, 'object', self, 'fieldname', 'degacc', 'format', 'Double')
    205205        WriteData(fid, prefix, 'object', self, 'fieldname', 'transitions', 'format', 'MatArray')
    206         WriteData(fid, prefix, 'object', self, 'fieldname', 'loop_increment', 'format', 'Integer')
    207206        WriteData(fid, prefix, 'object', self, 'fieldname', 'horiz', 'format', 'Integer')
    208207        WriteData(fid, prefix, 'object', self, 'fieldname', 'geodetic', 'format', 'Integer')
     208        WriteData(fid, prefix, 'object', self, 'fieldname', 'planetradius', 'format', 'Double')
    209209
    210210    #process requested outputs
    211211        outputs = self.requested_outputs
  • ../trunk-jpl/src/m/classes/qmu.py

     
    11import numpy as np
    2 from MatlabFuncs import *
    3 from IssmConfig import *
    4 from project3d import project3d
     2
     3from checkfield import checkfield
    54from collections import OrderedDict
     5from dakota_method import *
    66from fielddisplay import fielddisplay
    7 from checkfield import checkfield
     7from helpers import *
     8from IssmConfig import *
     9from MatlabFuncs import *
    810from WriteData import WriteData
    9 from helpers import *
    10 from dakota_method import *
    1111
    1212
    1313class qmu(object):
     
    2626        self.method = OrderedDict()
    2727        self.params = OrderedStruct()
    2828        self.results = OrderedDict()
    29         self.numberofpartitions = 0
    3029        self.numberofresponses = 0
    3130        self.variabledescriptors = []
     31        self.variablepartitions = []
     32        self.variablepartitions_npart = []
    3233        self.responsedescriptors = []
     34        self.responsepartitions = []
     35        self.responsepartitions_npart = []
    3336        self.mass_flux_profile_directory = float('NaN')
    3437        self.mass_flux_profiles = float('NaN')
    3538        self.mass_flux_segments = []
     
    108111                size = np.shape(getattr(result, fname))
    109112                s += "            %-*s:    [%ix%i]    '%s'\n" % (maxlen + 1, fname, a, b, type(getattr(result, fname)))
    110113
    111         s += "%s\n" % fielddisplay(self, 'vpartition', 'user provided mesh partitioning (vertex based)')
    112         s += "%s\n" % fielddisplay(self, 'epartition', 'user provided mesh partitioning (element based)')
    113         s += "%s\n" % fielddisplay(self, 'numberofpartitions', 'number of partitions for semi - discrete qmu')
     114        s += "%s\n" % fielddisplay(self, 'variablepartitions', '')
     115        s += "%s\n" % fielddisplay(self, 'variablepartitions_npart', '')
    114116        s += "%s\n" % fielddisplay(self, 'variabledescriptors', '')
    115117        s += "%s\n" % fielddisplay(self, 'responsedescriptors', '')
    116118        s += "%s\n" % fielddisplay(self, 'method', 'array of dakota_method class')
     
    123125        return s
    124126    # }}}
    125127    def extrude(self, md):  # {{{
    126         self.vpartition = project3d(md, 'vector', np.transpose(self.vpartition), 'type', 'node')
    127         self.epartition = project3d(md, 'vector', np.transpose(self.epartition), 'type', 'element')
    128128        return self
    129129    #}}}
    130130    def setdefaultparameters(self):  # {{{
     
    154154            if np.mod(md.cluster.np - 1, self.params.processors_per_evaluation):
    155155                md.checkmessage('in parallel library mode, the requirement is for md.cluster.np = md.qmu.params.processors_per_evaluation * number_of_slaves, where number_of_slaves will automatically be determined by Dakota. Modify md.cluster.np accordingly')
    156156
    157         if np.size(md.qmu.vpartition) > 0:
    158             if np.size(md.qmu.vpartition, 0) != md.mesh.numberofvertices:
    159                 md.checkmessage("user supplied vertex partition for qmu analysis should have size (md.mesh.numberofvertices x 1)")
    160             if not min(md.qmu.vpartition.flatten()) == 0:
    161                 md.checkmessage("vertex partition vector not indexed from 0 on")
    162             if max(md.qmu.vpartition.flatten()) >= md.qmu.numberofpartitions:
    163                 md.checkmessage("for qmu analysis, vertex partitioning vector cannot go over npart, number of partition areas")
     157        # Go through variables and check for consistency
     158        fv = fieldnames(self.variables)
     159        for i in range(len(fv)):
     160            getattr(self.variables, fv[i]).checkconsistency(md, solution, analyses)
    164161
    165                 if np.size(md.qmu.epartition) > 0:
    166                     if np.size(md.qmu.epartition, 0) != md.mesh.numberofelements:
    167                         md.checkmessage("user supplied element partition for qmu analysis should have size (md.mesh.numberofelements x 1)")
    168                     if not min(md.qmu.epartition.flatten()) == 0:
    169                         md.checkmessage("elememtn partition vector not indexed from 0 on")
    170                     if max(md.qmu.epartition.flatten()) >= md.qmu.numberofpartitions:
    171                         md.checkmessage("for qmu analysis, element partitioning vector cannot go over npart, number of partition areas")
    172 
    173                 if np.size(md.qmu.vpartition) == 0 or np.any(np.isnan(md.qmu.vpartition)) or np.size(md.qmu.epartition) == 0 or np.any(np.isnan(md.qmu.epartition)):
    174                     md.checkmessage("for qmu analysis, both an element and partitioning vectors need to be supplied with no nan values! One can be defaulted to all zeros.")
    175 
    176162        return md
    177163    # }}}
    178164    def marshall(self, prefix, md, fid):  # {{{
     
    181167        if not self.isdakota:
    182168            WriteData(fid, prefix, 'data', False, 'name', 'md.qmu.mass_flux_segments_present', 'format', 'Boolean')
    183169            return
    184         WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofpartitions', 'format', 'Integer')
    185170        WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofresponses', 'format', 'Integer')
    186171        WriteData(fid, prefix, 'object', self, 'fieldname', 'variabledescriptors', 'format', 'StringArray')
     172        WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions', 'format', 'MatArray')
     173        WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_npart', 'format', 'IntMat', 'mattype', 3)
    187174        WriteData(fid, prefix, 'object', self, 'fieldname', 'responsedescriptors', 'format', 'StringArray')
     175        WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions', 'format', 'MatArray')
     176        WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions_npart', 'format', 'IntMat', 'mattype', 3)
    188177        if not isempty(self.mass_flux_segments):
    189178            WriteData(fid, prefix, 'data', self.mass_flux_segments, 'name', 'md.qmu.mass_flux_segments', 'format', 'MatArray')
    190179            flag = True
  • ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py

     
    1 from MatlabFuncs import *
    21from copy import deepcopy
    32from helpers import *
     3from MatlabFuncs import *
    44from normal_uncertain import *
     5from qmupart2npart import *
    56from uniform_uncertain import *
    67
    78
     
    1617
    1718    #ok, key off according to type of descriptor:
    1819    if strncmp(descriptor, 'scaled_', 7):
    19         #we have a scaled variable, expand it over the partition.
     20        #we have a scaled variable, expand it over the partition. First recover the partition.
     21        partition = variables.partition
     22        #figure out number of partitions
     23        npart=qmupart2npart(partition)
     24
    2025        if isinstance(variables, uniform_uncertain):
    21             if ((type(variables.lower) in [list, np.ndarray] and len(variables.lower) > md.qmu.numberofpartitions) or (type(variables.upper) in [list, np.ndarray] and len(variables.upper) > md.qmu.numberofpartitions)):
    22                 raise RuntimeError('QmuSetupDesign error message: upper and lower should be either a scalar or a "npart" length vector')
     26            nlower=len(variables.lower)
     27            nupper=len(variables.upper)
     28            if nlower != npart or nupper != npart:
     29                raise RuntimeError('QmuSetupVariables error message: upper and lower fields should be same size as the number of partitions')
    2330        elif isinstance(variables, normal_uncertain):
    24             if type(variables.stddev) in [list, np.ndarray] and len(variables.stddev) > md.qmu.numberofpartitions:
    25                 raise RuntimeError('QmuSetupDesign error message: stddev should be either a scalar or a "npart" length vector')
     31            nstddev=len(variables.stddev)
     32            nmean=len(variables.mean)
     33            if nstddev != npart or nmean != npart:
     34                raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should be same size as the number of partitions')
    2635
    2736        #ok, dealing with semi-discrete distributed variable. Distribute according to how many
    2837        #partitions we want
    29         for j in range(md.qmu.numberofpartitions):
     38        for j in range(npart):
    3039            dvar.append(deepcopy(variables))
    3140
    3241            # text parsing in dakota requires literal "'identifier'" not just "identifier"
     
    3342            dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'"
    3443
    3544            if isinstance(variables, uniform_uncertain):
    36                 if type(variables.lower) in [list, np.ndarray]:
    37                     dvar[-1].lower = variables.lower[j]
    38                 else:
    39                     dvar[-1].lower = variables.lower
    40                 if type(variables.upper) in [list, np.ndarray]:
    41                     dvar[-1].upper = variables.upper[j]
    42                 else:
    43                     dvar[-1].upper = variables.upper
     45                dvar[-1].lower = variables.lower[j]
     46                dvar[-1].upper = variables.upper[j]
    4447            elif isinstance(variables, normal_uncertain):
    45                 if type(variables.stddev) in [list, np.ndarray]:
    46                     dvar[-1].stddev = variables.stddev[j]
    47                 else:
    48                     dvar[-1].stddev = variables.stddev
    49                 if type(variables.mean) in [list, np.ndarray]:
    50                     dvar[-1].mean = variables.mean[j]
    51                 else:
    52                     dvar[-1].mean = variables.mean
     48                dvar[-1].stddev = variables.stddev[j]
     49                dvar[-1].mean = variables.mean[j]
    5350    else:
    5451        dvar.append(deepcopy(variables))
    5552
  • ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py

     
     1from copy import deepcopy
    12from MatlabFuncs import *
    2 from copy import deepcopy
     3from qmupart2npart import *
    34
    45
    56def QmuSetupResponses(md, dresp, responses):
     
    78    #get descriptor
    89    descriptor = responses.descriptor
    910
    10     #decide whether this is a distributed response, which will drive whether we expand it into npart values,
    11     #or if we just carry it forward as is.
     11    # Decide whether this is a distributed response, which will drive whether
     12    # we expand it into npart values, or if we just carry it forward as is.
    1213
    1314    #ok, key off according to type of descriptor:
    1415    if strncmp(descriptor, 'scaled_', 7):
    1516        #we have a scaled response, expand it over the partition.
    16         #ok, dealing with semi - discrete distributed response. Distribute according to how many
    17         #partitions we want
    18         for j in range(md.qmu.numberofpartitions):
     17
     18        # Ok, dealing with semi-discrete distributed response. Distribute
     19        # according to how many partitions we want.
     20        npart = qmupart2npart(responses.partition)
     21
     22        for j in range(npart):
    1923            dresp.append(deepcopy(responses))
    2024            dresp[-1].descriptor = str(responses.descriptor) + '_' + str(j + 1)
    2125    else:
  • ../trunk-jpl/src/m/qmu/preqmu.py

     
    11import os
    2 from MatlabFuncs import *
     2
     3from dakota_in_data import *
     4from expandresponses import *
    35from expandvariables import *
    4 from expandresponses import *
    56from helpers import *
    6 from dakota_in_data import *
     7from MatlabFuncs import *
    78from process_qmu_response_data import *
     9from qmupart2npart import *
    810
    911
    1012def preqmu(md, options):
     
    105107            responsedescriptors.append(fieldresponses[j].descriptor)
    106108    #}}}
    107109
     110    # Build a list of variable partitions
     111    variablepartitions = []
     112    variablepartitions_npart = []
     113    variable_fieldnames = fieldnames(md.qmu.variables)
     114    for in range(len(variable_fieldnames)):
     115        field_name = variable_fieldnames[i]
     116        fieldvariable = getattr(md.qmu.variables, field_name)
     117        if fieldvariable.isscaled():
     118            variablepartitions.append(fieldvariable.partition)
     119            variablepartitions_npart.append(qmupart2npart(fieldvariable.partition))
     120        else:
     121            variablepartitions.append([])
     122            variablepartitions_npart.append(0)
     123
     124    # Build a list of response partitions
     125    responsepartitions = []
     126    responsepartitions_npart = []
     127    response_fieldnames = fieldnames(md.qmu.responses)
     128    for in range(len(response_fieldnames)):
     129        field_name = response_fieldnames[i]
     130        fieldresponse = getattr(md.qmu.variables, field_name)
     131        if fieldresponse.isscaled():
     132            responsepartitions.append(fieldresponse.partition)
     133            responsepartitions_npart.append(qmupart2npart(fieldresponse.partition))
     134        else:
     135            responsepartitions.append([])
     136            responsepartitions_npart.append(0)
     137
    108138    # register the fields that will be needed by the Qmu model.
    109139    md.qmu.numberofresponses = numresponses
    110140    md.qmu.variabledescriptors = variabledescriptors
     141    md.qmu.variablepartitions = variablepartitions
     142    md.qmu.variablepartitions_npart = variablepartitions_npart
    111143    md.qmu.responsedescriptors = responsedescriptors
     144    md.qmu.responsepartitions = responsepartitions
     145    md.qmu.responsepartitions_npart = responsepartitions_npart
    112146
    113147    # now, we have to provide all the info necessary for the solutions to compute the
    114148    # responses. For ex, if mass_flux is a response, we need a profile of points.
Note: See TracBrowser for help on using the repository browser.