Changeset 25022


Ignore:
Timestamp:
06/11/20 20:34:52 (5 years ago)
Author:
jdquinn
Message:

CHG: More MATLAB to Python API changes for Dakota interface

Location:
issm/trunk-jpl
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • issm/trunk-jpl/src/m/classes/qmu.py

    r25015 r25022  
    3434        self.responsepartitions = []
    3535        self.responsepartitions_npart = []
     36        self.responsepartitions_nt = []
    3637        self.mass_flux_profile_directory = float('NaN')
    3738        self.mass_flux_profiles = float('NaN')
     
    114115        s += "%s\n" % fielddisplay(self, 'variablepartitions', '')
    115116        s += "%s\n" % fielddisplay(self, 'variablepartitions_npart', '')
     117        s += "%s\n" % fielddisplay(self, 'variablepartitions_nt', '')
    116118        s += "%s\n" % fielddisplay(self, 'variabledescriptors', '')
    117119        s += "%s\n" % fielddisplay(self, 'responsedescriptors', '')
     
    174176        WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions', 'format', 'MatArray')
    175177        WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_npart', 'format', 'IntMat', 'mattype', 3)
     178        WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_nt', 'format', 'IntMat', 'mattype', 3)
    176179        WriteData(fid, prefix, 'object', self, 'fieldname', 'responsedescriptors', 'format', 'StringArray')
    177180        WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions', 'format', 'MatArray')
  • issm/trunk-jpl/src/m/classes/qmu/normal_uncertain.m

    r25019 r25022  
    3939                                npart=qmupart2npart(self.partition);
    4040                                if npart~=size(self.mean,1),
    41                                         error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the mean field should be identifical to the number of partitions']);
     41                                        error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the mean field should be identical to the number of partitions']);
    4242                                end
    4343                                if npart~=size(self.stddev,1),
    44                                         error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the stddev field should be identifical to the number of partitions']);
     44                                        error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the stddev field should be identical to the number of partitions']);
    4545                                end
    4646                                if self.nsteps~=size(self.mean,2),
    47                                         error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the mean field should be identifical to the number of time steps']);
     47                                        error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the mean field should be identical to the number of time steps']);
    4848                                end
    49                                 if npart~=size(self.stddev,1),
    50                                         error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the stddev field should be identifical to the number of time steps']);
     49                                if self.nsteps~=size(self.stddev,2),
     50                                        error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the stddev field should be identical to the number of time steps']);
    5151                                end
    5252
  • issm/trunk-jpl/src/m/classes/qmu/normal_uncertain.py

    r24873 r25022  
    66from pairoptions import pairoptions
    77from partition_npart import *
     8from qmupart2npart import qmupart2npart
    89
    910
     
    3637        self.stddev     = np.NaN
    3738        self.partition  = []
     39        self.nsteps     = 0
    3840
    3941    @staticmethod
     
    6163
    6264            #initialize fields:
    63             nuv.descriptor = options.getfieldvalue('descriptor', '')
    64             nuv.mean       = options.getfieldvalue('mean', np.NaN)
    65             nuv.stddev     = options.getfieldvalue('stddev', np.NaN)
     65            nuv.descriptor = options.getfieldvalue('descriptor')
     66            nuv.mean       = options.getfieldvalue('mean')
     67            nuv.stddev     = options.getfieldvalue('stddev')
    6668
    6769            #if the variable is scaled, a partition vector should have been supplied, and
     
    7072            if nuv.isscaled():
    7173                nuv.partition = options.getfieldvalue('partition')
    72                 npart = partition_npart(nuv.partition)
    73                 if npart != len(nuv.mean):
    74                     error("normal_uncertain constructor: for the scaled variable %s the mean field is not currently a vector of values for all the partitions described in the partition vector" % nuv.descriptor)
    75                 if npart != len(nuv.stddev):
    76                     error("normal_uncertain constructor: for the scaled variable %s the stddev field is not cureently a vector of values for all the partitions described in the partition vector" % nuv.descriptor)
     74                nuv.nsteps = options.getfieldvalue('nsteps', 1)
     75                npart = qmupart2npart(nuv.partition)
     76                if npart != nuv.mean.shape[0]:
     77                    raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the row size of the mean field should be identical to the number of partitions" % nuv.descriptor)
     78                if npart != nuv.stddev.shape[0]:
     79                    raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the row size of the stddev field should be identical to the number of partitions" % nuv.descriptor)
     80                if nuv.nsteps != nuv.mean.shape[1]:
     81                    raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the col size of the mean field should be identical to the number of time steps" % nuv.descriptor)
     82                if nuv.nsteps != nuv.stddev.shape[1]:
     83                    raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the col size of the stddev field should be identical to the number of time steps" % nuv.descriptor)
    7784
    7885        return [nuv] # Always return a list, so we have something akin to a MATLAB single row matrix
     
    8693        if self.partition != []:
    8794            string = "%s\n%s" % (string, fielddisplay(self, 'partition', 'partition vector defining where sampling will occur'))
     95        string = "%s\n%s" % (string, fielddisplay(self, 'nsteps', 'number of time steps'))
    8896
    8997        return string
     
    102110        if self.isscaled():
    103111            if self.partition == []:
    104                 error("normal_uncertain is a scaled variable, but it's missing a partition vector")
     112                raise RuntimeError("normal_uncertain is a scaled variable, but it's missing a partition vector")
    105113            #better have a partition vector that has as many partitions as stddev's size:
    106             if len(self.stddev) != partition_npart(self.partititon):
    107                 error("normal_uncertain error message: stddev and partition should be vectors of identical size")
    108             if len(self.mean) != partition_npart(self.partition):
    109                 error("normal_uncertain error message: mean and partition should be vectors of identical size")
     114            if self.stddev.shape[0] != partition_npart(self.partititon):
     115                raise RuntimeError("normal_uncertain error message: row size of stddev and partition size should be identical")
     116            if self.mean.shape[0] != partition_npart(self.partition):
     117                raise RuntimeError("normal_uncertain error message: row size of mean and partition size should be identical")
     118            #we need as many steps in stddev and mean as there are in time steps
     119            if self.stddev.shape[1] != self.nsteps:
     120                raise RuntimeError("normal_uncertain error message: col size of stddev and partition size should be identical")
     121            if self.mean.shape[1] != self.nsteps:
     122                raise RuntimeError("normal_uncertain error message: col size of mean and partition size should be identical")
    110123            md = checkfield(md, 'field', self.partition, 'fieldname', 'normal_uncertain.partition', 'NaN', 1, 'Inf', 1, '>=', -1, 'numel', [md.mesh.numberofvertices, md.mesh.numberofvertices])
    111124            if self.partition.shape[1] > 1:
    112                 error("normal_uncertain error message: partition should be a column vector")
     125                raise RuntimeError("normal_uncertain error message: partition should be a column vector")
    113126            partcheck = np.unique(self.partition)
    114127            partmin = min(partcheck)
    115128            partmax = max(partcheck)
    116129            if partmax < -1:
    117                 error("normal_uncertain error message: partition vector's min value should be -1 (for no partition), or start at 0")
     130                raise RuntimeError("normal_uncertain error message: partition vector's min value should be -1 (for no partition), or start at 0")
    118131            nmax = max(md.mesh.numberofelements, md.mesh.numberofvertices)
    119132            if partmax > nmax:
    120                 error("normal_uncertain error message: partition vector's values cannot go over the number of vertices or elements")
     133                raise RuntimeError("normal_uncertain error message: partition vector's values cannot go over the number of vertices or elements")
    121134    #}}}
    122135
  • issm/trunk-jpl/src/m/partition/AreaAverageOntoPartition.py

    r24213 r25022  
    11import numpy as np
     2
     3from adjacency import adjacency
    24import copy
    3 from adjacency import adjacency
    45from project2d import project2d
     6from qmupart2npart import qmupart2npart
    57
    68
    7 def AreaAverageOntoPartition(md, vector, layer=None):
    8     '''AREAAVERAGEONTOPARTITION
    9    compute partition values for a certain vector expressed on the vertices of the mesh.
    10    Use area weighted average.
     9def AreaAverageOntoPartition(md, vector, layer=None, partition):
     10    '''
     11    AREAAVERAGEONTOPARTITION - compute partition values for a certain vector expressed on the vertices of the mesh.
     12    Use area weighted average.
    1113
    12    Usage:
    13       average = AreaAverageOntoPartition(md, vector)
    14       average = AreaAverageOntoPartition(md, vector, layer)  #if in 3D, chose which layer is partitioned
    15 '''
     14    Usage:
     15        average = AreaAverageOntoPartition(md, vector)
     16        average = AreaAverageOntoPartition(md, vector, layer) # If in 3D, chose which layer is partitioned
     17    '''
     18
    1619    #some checks
    1720    if(md.mesh.dimension() == 3):
     
    3538    #finally, project vector:
    3639        vector = project2d(md3d, vector, layer)
    37         md.qmu.vpartition = project2d(md3d, md3d.qmu.vpartition, layer)
     40        partition = project2d(md3d, partition, layer)
    3841
    3942    #ok, first check that part is Matlab indexed
    40     part = (md.qmu.vpartition).copy()
     43    part = partition.copy()
    4144    part = part.flatten() + 1
    4245
    4346    #some check:
    44     if md.qmu.numberofpartitions != max(part):
    45         raise RuntimeError('AreaAverageOntoPartition error message: ''npart'' should be equal to max(md.qmu.vpartition)')
     47    npart = qmupart2npart(partition)
     48    if npart != max(part):
     49        raise RuntimeError('AreaAverageOntoPartition error message: ''npart'' should be equal to max(partition)')
    4650
    4751    #initialize output
  • issm/trunk-jpl/src/m/qmu/preqmu.m

    r25020 r25022  
    5959                        npart=partition_npart(fieldresponses(j).partition);
    6060                        if str2int(fieldresponses(j).descriptor,'last')>npart,
    61                                 error('preqmu error message: one of the expanded responses has more values than the number of partitions (setup in md.qmu.numberofpartitions)');
     61                                error('preqmu error message: one of the expanded responses has more values than the number of partitions');
    6262                        end
    6363                end
     
    9191
    9292%build a MatArray of variable partitions:
    93 variable_fieldnames=fieldnames(md.qmu.variables(ivar));
    9493variablepartitions={};
    9594variablepartitions_npart=[];
    9695variablepartitions_nt=[];
     96variable_fieldnames=fieldnames(md.qmu.variables(ivar));
    9797for i=1:length(variable_fieldnames),
    9898        field_name=variable_fieldnames{i};
     
    110110
    111111%build a MatArray of response partitions:
    112 response_fieldnames=fieldnames(md.qmu.responses(ivar));
    113112responsepartitions={};
    114113responsepartitions_npart=[];
     114response_fieldnames=fieldnames(md.qmu.responses(iresp));
    115115for i=1:length(response_fieldnames),
    116116        field_name=response_fieldnames{i};
    117         fieldresponse=md.qmu.responses(ivar).(field_name);
     117        fieldresponse=md.qmu.responses(iresp).(field_name);
    118118        if fieldresponse.isscaled();
    119119                responsepartitions{end+1}=fieldresponse.partition;
  • issm/trunk-jpl/src/m/qmu/preqmu.py

    r25015 r25022  
    6161        for j in range(np.size(fieldvariables)):
    6262            if strncmpi(fieldvariables[j].descriptor, 'scaled_', 7):
    63                 npart = partition_npart(fieldvariables[j].partition)
    64                 if str2int(fieldvariables[j].descriptor, 'last') > npart:
    65                     raise RuntimeError('preqmu error message: one of the expanded variables has more values than the number of partitions (setup in md.qmu.numberofpartitions)')
     63                npart = qmupart2npart(fieldvariables[j].partition)
     64                nt = fieldvariables[j].nsteps
     65                if nt == 1:
     66                    if str2int(fieldvariables[j].descriptor, 'last') > npart:
     67                        raise RuntimeError('preqmu error message: one of the expanded variables has more values than the number of partitions')
    6668        numvariables = numvariables + np.size(vars(variables)[field_name])
    6769
     
    7476            if strncmpi(fieldresponses[j].descriptor, 'scaled_', 7):
    7577                npart = partition_npart(fieldresponses[j].partition)
    76                 if str2int(fieldresponses[j].descriptor, 'last') > md.qmu.numberofpartitions:
    77                     raise RuntimeError('preqmu error message: one of the expanded responses has more values than the number of partitions (setup in md.qmu.numberofpartitions)')
     78                if str2int(fieldresponses[j].descriptor, 'last') > npart:
     79                    raise RuntimeError('preqmu error message: one of the expanded responses has more values than the number of partitions')
    7880        numresponses = numresponses + np.size(vars(responses)[field_name])
    7981    #}}}
     
    112114    #}}}
    113115
    114     # Build a list of variable partitions
     116    #build a list of variable partitions
    115117    variablepartitions = []
    116118    variablepartitions_npart = []
     119    variablepartitions_nt = []
    117120    variable_fieldnames = fieldnames(md.qmu.variables)
    118121    for i in range(len(variable_fieldnames)):
     
    124127                    variablepartitions.append(fieldvariable[j].partition)
    125128                    variablepartitions_npart.append(qmupart2npart(fieldvariable[j].partition))
     129                    variablepartitions_nt.append(fieldvariable.nsteps)
    126130                else:
    127131                    variablepartitions.append([])
    128132                    variablepartitions_npart.append(0)
     133                    variablepartitions_nt.append(1)
    129134        else:
    130135            if fieldvariable.isscaled():
    131136                variablepartitions.append(fieldvariable.partition)
    132137                variablepartitions_npart.append(qmupart2npart(fieldvariable.partition))
     138                variablepartitions_nt.append(fieldvariable.nsteps)
    133139            else:
    134140                variablepartitions.append([])
    135141                variablepartitions_npart.append(0)
     142                variablepartitions_nt.append(1)
    136143
    137     # Build a list of response partitions
     144    #build a list of response partitions
    138145    responsepartitions = []
    139146    responsepartitions_npart = []
     
    146153                if fieldresponse[j].isscaled():
    147154                    responsepartitions.append(fieldresponse[j].partition)
    148                     responsepartitions_npart.append(qmupart2npart(fieldresponse.partition))
     155                    responsepartitions_npart.append(qmupart2npart(fieldresponse[j].partition))
    149156                else:
    150157                    responsepartitions.append([])
     
    163170    md.qmu.variablepartitions = variablepartitions
    164171    md.qmu.variablepartitions_npart = variablepartitions_npart
     172    md.qmu.variablepartitions_nt = variablepartitions_nt
    165173    md.qmu.responsedescriptors = responsedescriptors
    166174    md.qmu.responsepartitions = responsepartitions
  • issm/trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.m

    r25021 r25022  
    2525                nmean=size(variables.mean,1);
    2626                if (nstddev ~= npart || nmean ~=npart),
    27                         error('QmuSetupVariables error message: stddev and mean fields should be row sized as the number of partitions');
     27                        error('QmuSetupVariables error message: stddev and mean fields should have the same number of rows as the number of partitions');
    2828                end
    2929                nstddev=size(variables.stddev,2);
    3030                nmean=size(variables.mean,2);
    3131                if (nstddev ~= nt || nmean ~=nt),
    32                         error('QmuSetupVariables error message: stddev and mean fields should be col sized as the number of time steps');
     32                        error('QmuSetupVariables error message: stddev and mean fields should have the same number of cols as the number of time steps');
    3333                end
    3434
  • issm/trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py

    r25014 r25022  
    2121        partition = variables.partition
    2222        #figure out number of partitions
    23         npart=qmupart2npart(partition)
     23        npart = qmupart2npart(partition)
     24        #figure out number of time steps
     25        nt = variables.nsteps
    2426
    2527        if isinstance(variables, uniform_uncertain):
    26             nlower=len(variables.lower)
    27             nupper=len(variables.upper)
     28            nlower = len(variables.lower)
     29            nupper = len(variables.upper)
    2830            if nlower != npart or nupper != npart:
    2931                raise RuntimeError('QmuSetupVariables error message: upper and lower fields should be same size as the number of partitions')
    3032        elif isinstance(variables, normal_uncertain):
    31             nstddev=len(variables.stddev)
    32             nmean=len(variables.mean)
     33            nstddev = variables.stddev.shape[0]
     34            nmean = variables.mean.shape[0]
    3335            if nstddev != npart or nmean != npart:
    34                 raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should be same size as the number of partitions')
     36                raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should have the same number of rows as the number of partitions')
     37            nstddev = variables.stddev.shape[1]
     38            nmean = variables.mean.shape[1]
     39            if nstddev != nt or nmean != nt:
     40                raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should have the same number of cols as the number of partitions')
    3541
    3642        #ok, dealing with semi-discrete distributed variable. Distribute according to how many
    37         #partitions we want
    38         for j in range(npart):
    39             dvar.append(deepcopy(variables))
     43        #partitions we want, and number of time steps
     44        if nt == 1:
     45            for j in range(npart):
     46                dvar.append(deepcopy(variables))
    4047
    41             # text parsing in dakota requires literal "'identifier'" not just "identifier"
    42             dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'"
     48                # text parsing in dakota requires literal "'identifier'" not just "identifier"
     49                dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'"
    4350
    44             if isinstance(variables, uniform_uncertain):
    45                 dvar[-1].lower = variables.lower[j]
    46                 dvar[-1].upper = variables.upper[j]
    47             elif isinstance(variables, normal_uncertain):
    48                 dvar[-1].stddev = variables.stddev[j]
    49                 dvar[-1].mean = variables.mean[j]
     51                if isinstance(variables, uniform_uncertain):
     52                    dvar[-1].lower = variables.lower[j]
     53                    dvar[-1].upper = variables.upper[j]
     54                elif isinstance(variables, normal_uncertain):
     55                    dvar[-1].stddev = variables.stddev[j]
     56                    dvar[-1].mean = variables.mean[j]
     57        else:
     58            for j in range(npart):
     59                for k in range(nt):
     60                    dvar.append(deepcopy(variables))
     61
     62                    # text parsing in dakota requires literal "'identifier'" not just "identifier"
     63                    dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + '_' + str(k + 1) + "'"
     64
     65                    if isinstance(variables, uniform_uncertain):
     66                        dvar[-1].lower = variables.lower[j][k]
     67                        dvar[-1].upper = variables.upper[j][k]
     68                    elif isinstance(variables, normal_uncertain):
     69                        dvar[-1].stddev = variables.stddev[j][k]
     70                        dvar[-1].mean = variables.mean[j][k]
    5071    else:
    5172        dvar.append(deepcopy(variables))
  • issm/trunk-jpl/test/NightlyRun/test412.py

    r25010 r25022  
    1919
    2020#partitioning
    21 md = partitioner(md, 'package', 'linear', 'npart', md.mesh.numberofvertices) - 1
     21npart = md.mesh.numberofvertices
     22partition = partitioner(md, 'package', 'linear', 'npart', npart) - 1
    2223md.qmu.isdakota = 1
    2324
     
    3637md.qmu.variables.drag_coefficient = normal_uncertain.normal_uncertain(
    3738    'descriptor', 'scaled_FrictionCoefficient',
    38     'mean', np.ones(md.mesh.numberofvertices),
    39     'stddev', .01 * np.ones(md.mesh.numberofvertices),
     39    'mean', np.ones(npart),
     40    'stddev', .01 * np.ones(npart),
    4041    'partition', partition
    4142    )
Note: See TracChangeset for help on using the changeset viewer.