Changeset 25022
- Timestamp:
- 06/11/20 20:34:52 (5 years ago)
- Location:
- issm/trunk-jpl
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/classes/qmu.py
r25015 r25022 34 34 self.responsepartitions = [] 35 35 self.responsepartitions_npart = [] 36 self.responsepartitions_nt = [] 36 37 self.mass_flux_profile_directory = float('NaN') 37 38 self.mass_flux_profiles = float('NaN') … … 114 115 s += "%s\n" % fielddisplay(self, 'variablepartitions', '') 115 116 s += "%s\n" % fielddisplay(self, 'variablepartitions_npart', '') 117 s += "%s\n" % fielddisplay(self, 'variablepartitions_nt', '') 116 118 s += "%s\n" % fielddisplay(self, 'variabledescriptors', '') 117 119 s += "%s\n" % fielddisplay(self, 'responsedescriptors', '') … … 174 176 WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions', 'format', 'MatArray') 175 177 WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_npart', 'format', 'IntMat', 'mattype', 3) 178 WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_nt', 'format', 'IntMat', 'mattype', 3) 176 179 WriteData(fid, prefix, 'object', self, 'fieldname', 'responsedescriptors', 'format', 'StringArray') 177 180 WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions', 'format', 'MatArray') -
issm/trunk-jpl/src/m/classes/qmu/normal_uncertain.m
r25019 r25022 39 39 npart=qmupart2npart(self.partition); 40 40 if npart~=size(self.mean,1), 41 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the mean field should be identi fical to the number of partitions']);41 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the mean field should be identical to the number of partitions']); 42 42 end 43 43 if npart~=size(self.stddev,1), 44 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the stddev field should be identi fical to the number of partitions']);44 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the row size of the stddev field should be identical to the number of partitions']); 45 45 end 46 46 if self.nsteps~=size(self.mean,2), 47 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the mean field should be identi fical to the number of time steps']);47 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the mean field should be identical to the number of time steps']); 48 48 end 49 if npart~=size(self.stddev,1),50 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the stddev field should be identi fical to the number of time steps']);49 if self.nsteps~=size(self.stddev,2), 50 error(['normal_uncertain constructor: for the scaled variable ' self.descriptor ' the col size of the stddev field should be identical to the number of time steps']); 51 51 end 52 52 -
issm/trunk-jpl/src/m/classes/qmu/normal_uncertain.py
r24873 r25022 6 6 from pairoptions import pairoptions 7 7 from partition_npart import * 8 from qmupart2npart import qmupart2npart 8 9 9 10 … … 36 37 self.stddev = np.NaN 37 38 self.partition = [] 39 self.nsteps = 0 38 40 39 41 @staticmethod … … 61 63 62 64 #initialize fields: 63 nuv.descriptor = options.getfieldvalue('descriptor' , '')64 nuv.mean = options.getfieldvalue('mean' , np.NaN)65 nuv.stddev = options.getfieldvalue('stddev' , np.NaN)65 nuv.descriptor = options.getfieldvalue('descriptor') 66 nuv.mean = options.getfieldvalue('mean') 67 nuv.stddev = options.getfieldvalue('stddev') 66 68 67 69 #if the variable is scaled, a partition vector should have been supplied, and … … 70 72 if nuv.isscaled(): 71 73 nuv.partition = options.getfieldvalue('partition') 72 npart = partition_npart(nuv.partition) 73 if npart != len(nuv.mean): 74 error("normal_uncertain constructor: for the scaled variable %s the mean field is not currently a vector of values for all the partitions described in the partition vector" % nuv.descriptor) 75 if npart != len(nuv.stddev): 76 error("normal_uncertain constructor: for the scaled variable %s the stddev field is not cureently a vector of values for all the partitions described in the partition vector" % nuv.descriptor) 74 nuv.nsteps = options.getfieldvalue('nsteps', 1) 75 npart = qmupart2npart(nuv.partition) 76 if npart != nuv.mean.shape[0]: 77 raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the row size of the mean field should be identical to the number of partitions" % nuv.descriptor) 78 if npart != nuv.stddev.shape[0]: 79 raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the row size of the stddev field should be identical to the number of partitions" % nuv.descriptor) 80 if nuv.nsteps != nuv.mean.shape[1]: 81 raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the col size of the mean field should be identical to the number of time steps" % nuv.descriptor) 82 if nuv.nsteps != nuv.stddev.shape[1]: 83 raise RuntimeError("normal_uncertain constructor: for the scaled variable %s the col size of the stddev field should be identical to the number of time steps" % nuv.descriptor) 77 84 78 85 return [nuv] # Always return a list, so we have something akin to a MATLAB single row matrix … … 86 93 if self.partition != []: 87 94 string = "%s\n%s" % (string, fielddisplay(self, 'partition', 'partition vector defining where sampling will occur')) 95 string = "%s\n%s" % (string, fielddisplay(self, 'nsteps', 'number of time steps')) 88 96 89 97 return string … … 102 110 if self.isscaled(): 103 111 if self.partition == []: 104 error("normal_uncertain is a scaled variable, but it's missing a partition vector")112 raise RuntimeError("normal_uncertain is a scaled variable, but it's missing a partition vector") 105 113 #better have a partition vector that has as many partitions as stddev's size: 106 if len(self.stddev) != partition_npart(self.partititon): 107 error("normal_uncertain error message: stddev and partition should be vectors of identical size") 108 if len(self.mean) != partition_npart(self.partition): 109 error("normal_uncertain error message: mean and partition should be vectors of identical size") 114 if self.stddev.shape[0] != partition_npart(self.partititon): 115 raise RuntimeError("normal_uncertain error message: row size of stddev and partition size should be identical") 116 if self.mean.shape[0] != partition_npart(self.partition): 117 raise RuntimeError("normal_uncertain error message: row size of mean and partition size should be identical") 118 #we need as many steps in stddev and mean as there are in time steps 119 if self.stddev.shape[1] != self.nsteps: 120 raise RuntimeError("normal_uncertain error message: col size of stddev and partition size should be identical") 121 if self.mean.shape[1] != self.nsteps: 122 raise RuntimeError("normal_uncertain error message: col size of mean and partition size should be identical") 110 123 md = checkfield(md, 'field', self.partition, 'fieldname', 'normal_uncertain.partition', 'NaN', 1, 'Inf', 1, '>=', -1, 'numel', [md.mesh.numberofvertices, md.mesh.numberofvertices]) 111 124 if self.partition.shape[1] > 1: 112 error("normal_uncertain error message: partition should be a column vector")125 raise RuntimeError("normal_uncertain error message: partition should be a column vector") 113 126 partcheck = np.unique(self.partition) 114 127 partmin = min(partcheck) 115 128 partmax = max(partcheck) 116 129 if partmax < -1: 117 error("normal_uncertain error message: partition vector's min value should be -1 (for no partition), or start at 0")130 raise RuntimeError("normal_uncertain error message: partition vector's min value should be -1 (for no partition), or start at 0") 118 131 nmax = max(md.mesh.numberofelements, md.mesh.numberofvertices) 119 132 if partmax > nmax: 120 error("normal_uncertain error message: partition vector's values cannot go over the number of vertices or elements")133 raise RuntimeError("normal_uncertain error message: partition vector's values cannot go over the number of vertices or elements") 121 134 #}}} 122 135 -
issm/trunk-jpl/src/m/partition/AreaAverageOntoPartition.py
r24213 r25022 1 1 import numpy as np 2 3 from adjacency import adjacency 2 4 import copy 3 from adjacency import adjacency4 5 from project2d import project2d 6 from qmupart2npart import qmupart2npart 5 7 6 8 7 def AreaAverageOntoPartition(md, vector, layer=None ):8 ''' AREAAVERAGEONTOPARTITION9 compute partition values for a certain vector expressed on the vertices of the mesh.10 Use area weighted average.9 def AreaAverageOntoPartition(md, vector, layer=None, partition): 10 ''' 11 AREAAVERAGEONTOPARTITION - compute partition values for a certain vector expressed on the vertices of the mesh. 12 Use area weighted average. 11 13 12 Usage: 13 average = AreaAverageOntoPartition(md, vector) 14 average = AreaAverageOntoPartition(md, vector, layer) #if in 3D, chose which layer is partitioned 15 ''' 14 Usage: 15 average = AreaAverageOntoPartition(md, vector) 16 average = AreaAverageOntoPartition(md, vector, layer) # If in 3D, chose which layer is partitioned 17 ''' 18 16 19 #some checks 17 20 if(md.mesh.dimension() == 3): … … 35 38 #finally, project vector: 36 39 vector = project2d(md3d, vector, layer) 37 md.qmu.vpartition = project2d(md3d, md3d.qmu.vpartition, layer)40 partition = project2d(md3d, partition, layer) 38 41 39 42 #ok, first check that part is Matlab indexed 40 part = (md.qmu.vpartition).copy()43 part = partition.copy() 41 44 part = part.flatten() + 1 42 45 43 46 #some check: 44 if md.qmu.numberofpartitions != max(part): 45 raise RuntimeError('AreaAverageOntoPartition error message: ''npart'' should be equal to max(md.qmu.vpartition)') 47 npart = qmupart2npart(partition) 48 if npart != max(part): 49 raise RuntimeError('AreaAverageOntoPartition error message: ''npart'' should be equal to max(partition)') 46 50 47 51 #initialize output -
issm/trunk-jpl/src/m/qmu/preqmu.m
r25020 r25022 59 59 npart=partition_npart(fieldresponses(j).partition); 60 60 if str2int(fieldresponses(j).descriptor,'last')>npart, 61 error('preqmu error message: one of the expanded responses has more values than the number of partitions (setup in md.qmu.numberofpartitions)');61 error('preqmu error message: one of the expanded responses has more values than the number of partitions'); 62 62 end 63 63 end … … 91 91 92 92 %build a MatArray of variable partitions: 93 variable_fieldnames=fieldnames(md.qmu.variables(ivar));94 93 variablepartitions={}; 95 94 variablepartitions_npart=[]; 96 95 variablepartitions_nt=[]; 96 variable_fieldnames=fieldnames(md.qmu.variables(ivar)); 97 97 for i=1:length(variable_fieldnames), 98 98 field_name=variable_fieldnames{i}; … … 110 110 111 111 %build a MatArray of response partitions: 112 response_fieldnames=fieldnames(md.qmu.responses(ivar));113 112 responsepartitions={}; 114 113 responsepartitions_npart=[]; 114 response_fieldnames=fieldnames(md.qmu.responses(iresp)); 115 115 for i=1:length(response_fieldnames), 116 116 field_name=response_fieldnames{i}; 117 fieldresponse=md.qmu.responses(i var).(field_name);117 fieldresponse=md.qmu.responses(iresp).(field_name); 118 118 if fieldresponse.isscaled(); 119 119 responsepartitions{end+1}=fieldresponse.partition; -
issm/trunk-jpl/src/m/qmu/preqmu.py
r25015 r25022 61 61 for j in range(np.size(fieldvariables)): 62 62 if strncmpi(fieldvariables[j].descriptor, 'scaled_', 7): 63 npart = partition_npart(fieldvariables[j].partition) 64 if str2int(fieldvariables[j].descriptor, 'last') > npart: 65 raise RuntimeError('preqmu error message: one of the expanded variables has more values than the number of partitions (setup in md.qmu.numberofpartitions)') 63 npart = qmupart2npart(fieldvariables[j].partition) 64 nt = fieldvariables[j].nsteps 65 if nt == 1: 66 if str2int(fieldvariables[j].descriptor, 'last') > npart: 67 raise RuntimeError('preqmu error message: one of the expanded variables has more values than the number of partitions') 66 68 numvariables = numvariables + np.size(vars(variables)[field_name]) 67 69 … … 74 76 if strncmpi(fieldresponses[j].descriptor, 'scaled_', 7): 75 77 npart = partition_npart(fieldresponses[j].partition) 76 if str2int(fieldresponses[j].descriptor, 'last') > md.qmu.numberofpartitions:77 raise RuntimeError('preqmu error message: one of the expanded responses has more values than the number of partitions (setup in md.qmu.numberofpartitions)')78 if str2int(fieldresponses[j].descriptor, 'last') > npart: 79 raise RuntimeError('preqmu error message: one of the expanded responses has more values than the number of partitions') 78 80 numresponses = numresponses + np.size(vars(responses)[field_name]) 79 81 #}}} … … 112 114 #}}} 113 115 114 # Build a list of variable partitions116 #build a list of variable partitions 115 117 variablepartitions = [] 116 118 variablepartitions_npart = [] 119 variablepartitions_nt = [] 117 120 variable_fieldnames = fieldnames(md.qmu.variables) 118 121 for i in range(len(variable_fieldnames)): … … 124 127 variablepartitions.append(fieldvariable[j].partition) 125 128 variablepartitions_npart.append(qmupart2npart(fieldvariable[j].partition)) 129 variablepartitions_nt.append(fieldvariable.nsteps) 126 130 else: 127 131 variablepartitions.append([]) 128 132 variablepartitions_npart.append(0) 133 variablepartitions_nt.append(1) 129 134 else: 130 135 if fieldvariable.isscaled(): 131 136 variablepartitions.append(fieldvariable.partition) 132 137 variablepartitions_npart.append(qmupart2npart(fieldvariable.partition)) 138 variablepartitions_nt.append(fieldvariable.nsteps) 133 139 else: 134 140 variablepartitions.append([]) 135 141 variablepartitions_npart.append(0) 142 variablepartitions_nt.append(1) 136 143 137 # Build a list of response partitions144 #build a list of response partitions 138 145 responsepartitions = [] 139 146 responsepartitions_npart = [] … … 146 153 if fieldresponse[j].isscaled(): 147 154 responsepartitions.append(fieldresponse[j].partition) 148 responsepartitions_npart.append(qmupart2npart(fieldresponse .partition))155 responsepartitions_npart.append(qmupart2npart(fieldresponse[j].partition)) 149 156 else: 150 157 responsepartitions.append([]) … … 163 170 md.qmu.variablepartitions = variablepartitions 164 171 md.qmu.variablepartitions_npart = variablepartitions_npart 172 md.qmu.variablepartitions_nt = variablepartitions_nt 165 173 md.qmu.responsedescriptors = responsedescriptors 166 174 md.qmu.responsepartitions = responsepartitions -
issm/trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.m
r25021 r25022 25 25 nmean=size(variables.mean,1); 26 26 if (nstddev ~= npart || nmean ~=npart), 27 error('QmuSetupVariables error message: stddev and mean fields should be row sizedas the number of partitions');27 error('QmuSetupVariables error message: stddev and mean fields should have the same number of rows as the number of partitions'); 28 28 end 29 29 nstddev=size(variables.stddev,2); 30 30 nmean=size(variables.mean,2); 31 31 if (nstddev ~= nt || nmean ~=nt), 32 error('QmuSetupVariables error message: stddev and mean fields should be col sizedas the number of time steps');32 error('QmuSetupVariables error message: stddev and mean fields should have the same number of cols as the number of time steps'); 33 33 end 34 34 -
issm/trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py
r25014 r25022 21 21 partition = variables.partition 22 22 #figure out number of partitions 23 npart=qmupart2npart(partition) 23 npart = qmupart2npart(partition) 24 #figure out number of time steps 25 nt = variables.nsteps 24 26 25 27 if isinstance(variables, uniform_uncertain): 26 nlower =len(variables.lower)27 nupper =len(variables.upper)28 nlower = len(variables.lower) 29 nupper = len(variables.upper) 28 30 if nlower != npart or nupper != npart: 29 31 raise RuntimeError('QmuSetupVariables error message: upper and lower fields should be same size as the number of partitions') 30 32 elif isinstance(variables, normal_uncertain): 31 nstddev =len(variables.stddev)32 nmean =len(variables.mean)33 nstddev = variables.stddev.shape[0] 34 nmean = variables.mean.shape[0] 33 35 if nstddev != npart or nmean != npart: 34 raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should be same size as the number of partitions') 36 raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should have the same number of rows as the number of partitions') 37 nstddev = variables.stddev.shape[1] 38 nmean = variables.mean.shape[1] 39 if nstddev != nt or nmean != nt: 40 raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should have the same number of cols as the number of partitions') 35 41 36 42 #ok, dealing with semi-discrete distributed variable. Distribute according to how many 37 #partitions we want 38 for j in range(npart): 39 dvar.append(deepcopy(variables)) 43 #partitions we want, and number of time steps 44 if nt == 1: 45 for j in range(npart): 46 dvar.append(deepcopy(variables)) 40 47 41 # text parsing in dakota requires literal "'identifier'" not just "identifier"42 dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'"48 # text parsing in dakota requires literal "'identifier'" not just "identifier" 49 dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'" 43 50 44 if isinstance(variables, uniform_uncertain): 45 dvar[-1].lower = variables.lower[j] 46 dvar[-1].upper = variables.upper[j] 47 elif isinstance(variables, normal_uncertain): 48 dvar[-1].stddev = variables.stddev[j] 49 dvar[-1].mean = variables.mean[j] 51 if isinstance(variables, uniform_uncertain): 52 dvar[-1].lower = variables.lower[j] 53 dvar[-1].upper = variables.upper[j] 54 elif isinstance(variables, normal_uncertain): 55 dvar[-1].stddev = variables.stddev[j] 56 dvar[-1].mean = variables.mean[j] 57 else: 58 for j in range(npart): 59 for k in range(nt): 60 dvar.append(deepcopy(variables)) 61 62 # text parsing in dakota requires literal "'identifier'" not just "identifier" 63 dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + '_' + str(k + 1) + "'" 64 65 if isinstance(variables, uniform_uncertain): 66 dvar[-1].lower = variables.lower[j][k] 67 dvar[-1].upper = variables.upper[j][k] 68 elif isinstance(variables, normal_uncertain): 69 dvar[-1].stddev = variables.stddev[j][k] 70 dvar[-1].mean = variables.mean[j][k] 50 71 else: 51 72 dvar.append(deepcopy(variables)) -
issm/trunk-jpl/test/NightlyRun/test412.py
r25010 r25022 19 19 20 20 #partitioning 21 md = partitioner(md, 'package', 'linear', 'npart', md.mesh.numberofvertices) - 1 21 npart = md.mesh.numberofvertices 22 partition = partitioner(md, 'package', 'linear', 'npart', npart) - 1 22 23 md.qmu.isdakota = 1 23 24 … … 36 37 md.qmu.variables.drag_coefficient = normal_uncertain.normal_uncertain( 37 38 'descriptor', 'scaled_FrictionCoefficient', 38 'mean', np.ones( md.mesh.numberofvertices),39 'stddev', .01 * np.ones( md.mesh.numberofvertices),39 'mean', np.ones(npart), 40 'stddev', .01 * np.ones(npart), 40 41 'partition', partition 41 42 )
Note:
See TracChangeset
for help on using the changeset viewer.