source: issm/oecreview/Archive/24684-25833/ISSM-25010-25011.diff

Last change on this file was 25834, checked in by Mathieu Morlighem, 4 years ago

CHG: added 24684-25833

File size: 24.2 KB
RevLine 
[25834]1Index: ../trunk-jpl/src/m/modules/Scotch.py
2===================================================================
3--- ../trunk-jpl/src/m/modules/Scotch.py (revision 25010)
4+++ ../trunk-jpl/src/m/modules/Scotch.py (revision 25011)
5@@ -1,7 +1,7 @@
6 from Scotch_python import Scotch_python
7
8
9-def Scotch(* varargin):
10+def Scotch(*varargin):
11 '''SCOTCH - Scotch partitioner
12
13 Usage:
14@@ -8,6 +8,6 @@
15 maptab = Scotch(adjmat, vertlb, vertwt, edgewt, archtyp, archpar, Scotch - specific parameters)
16 '''
17 # Call mex module
18- maptab = Scotch_python(* varargin)
19+ maptab = Scotch_python(*varargin)
20
21 return maptab
22Index: ../trunk-jpl/src/m/boundaryconditions/love_numbers.py
23===================================================================
24--- ../trunk-jpl/src/m/boundaryconditions/love_numbers.py (revision 25010)
25+++ ../trunk-jpl/src/m/boundaryconditions/love_numbers.py (revision 25011)
26@@ -1,7 +1,7 @@
27 import numpy as np
28
29
30-def love_numbers(value, * varargin):
31+def love_numbers(value, *varargin):
32 '''LOVE_NUMBERS: provide love numbers (value 'h', 'k', 'l', 'gamma' and 'lambda'
33 retrieved from: http://www.srosat.com/iag-jsg/loveNb.php
34 Usage: series = love_numbers(value)
35Index: ../trunk-jpl/src/m/plot/export_gl.py
36===================================================================
37--- ../trunk-jpl/src/m/plot/export_gl.py (revision 25010)
38+++ ../trunk-jpl/src/m/plot/export_gl.py (revision 25011)
39@@ -6,7 +6,7 @@
40 from writejsfile import writejsfile
41
42
43-def export_gl(md, * varargin):
44+def export_gl(md, *varargin):
45 class ResultObj(object):
46 def __getattr__(self, attr):
47 return self.__dict__.get(attr)
48Index: ../trunk-jpl/src/m/classes/qmu/response_function.py
49===================================================================
50--- ../trunk-jpl/src/m/classes/qmu/response_function.py (revision 25010)
51+++ ../trunk-jpl/src/m/classes/qmu/response_function.py (revision 25011)
52@@ -1,7 +1,7 @@
53 import numpy as np
54
55+from fielddisplay import fielddisplay
56 from MatlabFuncs import *
57-from fielddisplay import fielddisplay
58 from pairoptions import pairoptions
59 from partition_npart import *
60 from rlev_write import *
61@@ -102,17 +102,17 @@
62
63 return [rf] # Always return a list, so we have something akin to a MATLAB single row matrix
64
65- def __repr__(self): #{{{
66+ def __repr__(rf): #{{{
67 # display the object
68 string = 'class "response_function" object = \n'
69- string = "%s\n%s" % (string, fielddisplay(self, 'descriptor', 'name tag'))
70- string = "%s\n%s" % (string, fielddisplay(self, 'respl', 'response levels'))
71- string = "%s\n%s" % (string, fielddisplay(self, 'probl', 'probability levels'))
72- string = "%s\n%s" % (string, fielddisplay(self, 'rell', 'reliability levels'))
73- string = "%s\n%s" % (string, fielddisplay(self, 'grell', 'general reliability levels'))
74+ string = "%s\n%s" % (string, fielddisplay(rf, 'descriptor', 'name tag'))
75+ string = "%s\n%s" % (string, fielddisplay(rf, 'respl', 'response levels'))
76+ string = "%s\n%s" % (string, fielddisplay(rf, 'probl', 'probability levels'))
77+ string = "%s\n%s" % (string, fielddisplay(rf, 'rell', 'reliability levels'))
78+ string = "%s\n%s" % (string, fielddisplay(rf, 'grell', 'general reliability levels'))
79
80- if self.partition != []:
81- string = "%s\n%s" % (string, fielddisplay(self, 'partition', 'partition'))
82+ if rf.partition != []:
83+ string = "%s\n%s" % (string, fielddisplay(rf, 'partition', 'partition vector defining where the response will be computed'))
84
85 return string
86 #}}}
87Index: ../trunk-jpl/src/m/coordsystems/gmtmask.py
88===================================================================
89--- ../trunk-jpl/src/m/coordsystems/gmtmask.py (revision 25010)
90+++ ../trunk-jpl/src/m/coordsystems/gmtmask.py (revision 25011)
91@@ -5,7 +5,7 @@
92 import subprocess
93
94
95-def gmtmask(lat, long, * varargin):
96+def gmtmask(lat, long, *varargin):
97 '''GMTMASK - figure out which lat, long points are on the ocean
98
99 Usage:
100Index: ../trunk-jpl/src/m/qmu/qmupart2npart.py
101===================================================================
102--- ../trunk-jpl/src/m/qmu/qmupart2npart.py (nonexistent)
103+++ ../trunk-jpl/src/m/qmu/qmupart2npart.py (revision 25011)
104@@ -0,0 +1,10 @@
105+import numpy as np
106+
107+
108+def qmupart2npart(vector):
109+ # Vector is full of -1 (no partition) and 0 to npart. We need to identify
110+ # npart.
111+
112+ npart = vector.max() + 1
113+
114+ return npart
115Index: ../trunk-jpl/src/m/partition/partitioner.py
116===================================================================
117--- ../trunk-jpl/src/m/partition/partitioner.py (revision 25010)
118+++ ../trunk-jpl/src/m/partition/partitioner.py (revision 25011)
119@@ -8,26 +8,26 @@
120 from mesh2d import *
121
122
123-def partitioner(md, * varargin):
124- help = '''
125-PARTITIONER - partition mesh
126+def partitioner(md, *varargin):
127+ '''
128+ PARTITIONER - partition mesh
129
130- List of options to partitioner:
131+ List of options to partitioner:
132
133- package: 'chaco', 'metis'
134- npart: number of partitions.
135- weighting: 'on' or 'off': default off
136- section: 1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection)
137- recomputeadjacency: 'on' by default (set to 'off' to compute existing one)
138- type: 'node' or 'element' partition vector (default to 'node')
139- Output: md.qmu.partition recover the partition vector
140+ package: 'chaco', 'metis'
141+ npart: number of partitions.
142+ weighting: 'on' or 'off': default off
143+ section: 1 by defaults(1 = bisection, 2 = quadrisection, 3 = octasection)
144+ recomputeadjacency: 'on' by default (set to 'off' to compute existing one)
145+ type: 'node' or 'element' partition vector (default to 'node')
146+ Output: partitionvector: the partition vector
147
148- Usage:
149- md = partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on')
150+ Usage:
151+ partitionvector = partitioner(md, 'package', 'chaco', 'npart', 100, 'weighting', 'on')
152 '''
153
154 #get options:
155- options = pairoptions(* varargin)
156+ options = pairoptions(*varargin)
157
158 #get options:
159 section = options.getfieldvalue('section', 1)
160@@ -121,12 +121,5 @@
161
162 part = part.reshape(-1, 1)
163
164- if vectortype == 'element':
165- md.qmu.epartition = part
166- if np.size(md.qmu.vpartition) == 0 or (np.size(md.qmu.vpartition) == 1 and np.isnan(md.qmu.vpartition)):
167- md.qmu.vpartition = np.zeros((md.mesh.numberofvertices, 1))
168- else:
169- md.qmu.vpartition = part
170- if np.size(md.qmu.epartition) == 0 or (np.size(md.qmu.epartition) == 1 and np.isnan(md.qmu.epartition)):
171- md.qmu.epartition = np.zeros((md.mesh.numberofelements, 1))
172- return md
173+ # Output
174+ return part
175Index: ../trunk-jpl/src/m/classes/slr.py
176===================================================================
177--- ../trunk-jpl/src/m/classes/slr.py (revision 25010)
178+++ ../trunk-jpl/src/m/classes/slr.py (revision 25011)
179@@ -1,8 +1,9 @@
180+import numpy as np
181+
182+from checkfield import checkfield
183 from fielddisplay import fielddisplay
184 from MatlabFuncs import *
185 from model import *
186-import numpy as np
187-from checkfield import checkfield
188 from WriteData import WriteData
189
190
191@@ -38,10 +39,10 @@
192 self.geodetic_run_frequency = 1 #how many time steps we skip before we run the geodetic part of the solver during transient
193 self.geodetic = 0 #compute geodetic SLR? (in addition to steric?)
194 self.degacc = 0
195- self.loop_increment = 0
196 self.horiz = 0
197 self.Ngia = float('NaN')
198 self.Ugia = float('NaN')
199+ self.planetradius = planetradius('earth')
200 self.requested_outputs = []
201 self.transitions = []
202
203@@ -71,7 +72,6 @@
204 string = "%s\n%s" % (string, fielddisplay(self, 'hydro_rate', 'rate of hydrological expansion [mm / yr]'))
205 string = "%s\n%s" % (string, fielddisplay(self, 'Ngia', 'rate of viscous (GIA) geoid expansion (in mm / yr)'))
206 string = "%s\n%s" % (string, fielddisplay(self, 'Ugia', 'rate of viscous (GIA) bedrock uplift (in mm / yr)'))
207- string = "%s\n%s" % (string, fielddisplay(self, 'loop_increment', 'vector assembly (in the convolution) framentation'))
208 string = "%s\n%s" % (string, fielddisplay(self, 'geodetic', 'compute geodetic SLR? (in addition to steric?) default 0'))
209 string = "%s\n%s" % (string, fielddisplay(self, 'geodetic_run_frequency', 'how many time steps we skip before we run SLR solver during transient (default: 1)'))
210 string = "%s\n%s" % (string, fielddisplay(self, 'rigid', 'rigid earth graviational potential perturbation'))
211@@ -90,7 +90,6 @@
212 self.abstol = float('NaN') #1 mm of sea level rise
213 #maximum of non - linear iterations.
214 self.maxiter = 5
215- self.loop_increment = 200
216 #computational flags:
217 self.geodetic = 0
218 self.rigid = 1
219@@ -120,6 +119,8 @@
220 self.transitions = []
221 #horizontal displacement? (not by default)
222 self.horiz = 0
223+ #earth area
224+ self.planetradius = planetradius('earth')
225
226 return self
227 #}}}
228@@ -149,7 +150,6 @@
229 md = checkfield(md, 'fieldname', 'slr.hydro_rate', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
230 md = checkfield(md, 'fieldname', 'slr.degacc', 'size', [1, 1], '>=', 1e-10)
231 md = checkfield(md, 'fieldname', 'slr.requested_outputs', 'stringrow', 1)
232- md = checkfield(md, 'fieldname', 'slr.loop_increment', 'NaN', 1, 'Inf', 1, '>=', 1)
233 md = checkfield(md, 'fieldname', 'slr.horiz', 'NaN', 1, 'Inf', 1, 'values', [0, 1])
234 md = checkfield(md, 'fieldname', 'slr.Ngia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
235 md = checkfield(md, 'fieldname', 'slr.Ugia', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices])
236@@ -203,9 +203,9 @@
237 WriteData(fid, prefix, 'object', self, 'fieldname', 'Ugia', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1e-3 / md.constants.yts)
238 WriteData(fid, prefix, 'object', self, 'fieldname', 'degacc', 'format', 'Double')
239 WriteData(fid, prefix, 'object', self, 'fieldname', 'transitions', 'format', 'MatArray')
240- WriteData(fid, prefix, 'object', self, 'fieldname', 'loop_increment', 'format', 'Integer')
241 WriteData(fid, prefix, 'object', self, 'fieldname', 'horiz', 'format', 'Integer')
242 WriteData(fid, prefix, 'object', self, 'fieldname', 'geodetic', 'format', 'Integer')
243+ WriteData(fid, prefix, 'object', self, 'fieldname', 'planetradius', 'format', 'Double')
244
245 #process requested outputs
246 outputs = self.requested_outputs
247Index: ../trunk-jpl/src/m/classes/qmu.py
248===================================================================
249--- ../trunk-jpl/src/m/classes/qmu.py (revision 25010)
250+++ ../trunk-jpl/src/m/classes/qmu.py (revision 25011)
251@@ -1,13 +1,13 @@
252 import numpy as np
253-from MatlabFuncs import *
254-from IssmConfig import *
255-from project3d import project3d
256+
257+from checkfield import checkfield
258 from collections import OrderedDict
259+from dakota_method import *
260 from fielddisplay import fielddisplay
261-from checkfield import checkfield
262+from helpers import *
263+from IssmConfig import *
264+from MatlabFuncs import *
265 from WriteData import WriteData
266-from helpers import *
267-from dakota_method import *
268
269
270 class qmu(object):
271@@ -26,10 +26,13 @@
272 self.method = OrderedDict()
273 self.params = OrderedStruct()
274 self.results = OrderedDict()
275- self.numberofpartitions = 0
276 self.numberofresponses = 0
277 self.variabledescriptors = []
278+ self.variablepartitions = []
279+ self.variablepartitions_npart = []
280 self.responsedescriptors = []
281+ self.responsepartitions = []
282+ self.responsepartitions_npart = []
283 self.mass_flux_profile_directory = float('NaN')
284 self.mass_flux_profiles = float('NaN')
285 self.mass_flux_segments = []
286@@ -108,9 +111,8 @@
287 size = np.shape(getattr(result, fname))
288 s += " %-*s: [%ix%i] '%s'\n" % (maxlen + 1, fname, a, b, type(getattr(result, fname)))
289
290- s += "%s\n" % fielddisplay(self, 'vpartition', 'user provided mesh partitioning (vertex based)')
291- s += "%s\n" % fielddisplay(self, 'epartition', 'user provided mesh partitioning (element based)')
292- s += "%s\n" % fielddisplay(self, 'numberofpartitions', 'number of partitions for semi - discrete qmu')
293+ s += "%s\n" % fielddisplay(self, 'variablepartitions', '')
294+ s += "%s\n" % fielddisplay(self, 'variablepartitions_npart', '')
295 s += "%s\n" % fielddisplay(self, 'variabledescriptors', '')
296 s += "%s\n" % fielddisplay(self, 'responsedescriptors', '')
297 s += "%s\n" % fielddisplay(self, 'method', 'array of dakota_method class')
298@@ -123,8 +125,6 @@
299 return s
300 # }}}
301 def extrude(self, md): # {{{
302- self.vpartition = project3d(md, 'vector', np.transpose(self.vpartition), 'type', 'node')
303- self.epartition = project3d(md, 'vector', np.transpose(self.epartition), 'type', 'element')
304 return self
305 #}}}
306 def setdefaultparameters(self): # {{{
307@@ -154,25 +154,11 @@
308 if np.mod(md.cluster.np - 1, self.params.processors_per_evaluation):
309 md.checkmessage('in parallel library mode, the requirement is for md.cluster.np = md.qmu.params.processors_per_evaluation * number_of_slaves, where number_of_slaves will automatically be determined by Dakota. Modify md.cluster.np accordingly')
310
311- if np.size(md.qmu.vpartition) > 0:
312- if np.size(md.qmu.vpartition, 0) != md.mesh.numberofvertices:
313- md.checkmessage("user supplied vertex partition for qmu analysis should have size (md.mesh.numberofvertices x 1)")
314- if not min(md.qmu.vpartition.flatten()) == 0:
315- md.checkmessage("vertex partition vector not indexed from 0 on")
316- if max(md.qmu.vpartition.flatten()) >= md.qmu.numberofpartitions:
317- md.checkmessage("for qmu analysis, vertex partitioning vector cannot go over npart, number of partition areas")
318+ # Go through variables and check for consistency
319+ fv = fieldnames(self.variables)
320+ for i in range(len(fv)):
321+ getattr(self.variables, fv[i]).checkconsistency(md, solution, analyses)
322
323- if np.size(md.qmu.epartition) > 0:
324- if np.size(md.qmu.epartition, 0) != md.mesh.numberofelements:
325- md.checkmessage("user supplied element partition for qmu analysis should have size (md.mesh.numberofelements x 1)")
326- if not min(md.qmu.epartition.flatten()) == 0:
327- md.checkmessage("elememtn partition vector not indexed from 0 on")
328- if max(md.qmu.epartition.flatten()) >= md.qmu.numberofpartitions:
329- md.checkmessage("for qmu analysis, element partitioning vector cannot go over npart, number of partition areas")
330-
331- if np.size(md.qmu.vpartition) == 0 or np.any(np.isnan(md.qmu.vpartition)) or np.size(md.qmu.epartition) == 0 or np.any(np.isnan(md.qmu.epartition)):
332- md.checkmessage("for qmu analysis, both an element and partitioning vectors need to be supplied with no nan values! One can be defaulted to all zeros.")
333-
334 return md
335 # }}}
336 def marshall(self, prefix, md, fid): # {{{
337@@ -181,10 +167,13 @@
338 if not self.isdakota:
339 WriteData(fid, prefix, 'data', False, 'name', 'md.qmu.mass_flux_segments_present', 'format', 'Boolean')
340 return
341- WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofpartitions', 'format', 'Integer')
342 WriteData(fid, prefix, 'object', self, 'fieldname', 'numberofresponses', 'format', 'Integer')
343 WriteData(fid, prefix, 'object', self, 'fieldname', 'variabledescriptors', 'format', 'StringArray')
344+ WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions', 'format', 'MatArray')
345+ WriteData(fid, prefix, 'object', self, 'fieldname', 'variablepartitions_npart', 'format', 'IntMat', 'mattype', 3)
346 WriteData(fid, prefix, 'object', self, 'fieldname', 'responsedescriptors', 'format', 'StringArray')
347+ WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions', 'format', 'MatArray')
348+ WriteData(fid, prefix, 'object', self, 'fieldname', 'responsepartitions_npart', 'format', 'IntMat', 'mattype', 3)
349 if not isempty(self.mass_flux_segments):
350 WriteData(fid, prefix, 'data', self.mass_flux_segments, 'name', 'md.qmu.mass_flux_segments', 'format', 'MatArray')
351 flag = True
352Index: ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py
353===================================================================
354--- ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py (revision 25010)
355+++ ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupVariables.py (revision 25011)
356@@ -1,7 +1,8 @@
357-from MatlabFuncs import *
358 from copy import deepcopy
359 from helpers import *
360+from MatlabFuncs import *
361 from normal_uncertain import *
362+from qmupart2npart import *
363 from uniform_uncertain import *
364
365
366@@ -16,17 +17,25 @@
367
368 #ok, key off according to type of descriptor:
369 if strncmp(descriptor, 'scaled_', 7):
370- #we have a scaled variable, expand it over the partition.
371+ #we have a scaled variable, expand it over the partition. First recover the partition.
372+ partition = variables.partition
373+ #figure out number of partitions
374+ npart=qmupart2npart(partition)
375+
376 if isinstance(variables, uniform_uncertain):
377- if ((type(variables.lower) in [list, np.ndarray] and len(variables.lower) > md.qmu.numberofpartitions) or (type(variables.upper) in [list, np.ndarray] and len(variables.upper) > md.qmu.numberofpartitions)):
378- raise RuntimeError('QmuSetupDesign error message: upper and lower should be either a scalar or a "npart" length vector')
379+ nlower=len(variables.lower)
380+ nupper=len(variables.upper)
381+ if nlower != npart or nupper != npart:
382+ raise RuntimeError('QmuSetupVariables error message: upper and lower fields should be same size as the number of partitions')
383 elif isinstance(variables, normal_uncertain):
384- if type(variables.stddev) in [list, np.ndarray] and len(variables.stddev) > md.qmu.numberofpartitions:
385- raise RuntimeError('QmuSetupDesign error message: stddev should be either a scalar or a "npart" length vector')
386+ nstddev=len(variables.stddev)
387+ nmean=len(variables.mean)
388+ if nstddev != npart or nmean != npart:
389+ raise RuntimeError('QmuSetupVariables error message: stddev and mean fields should be same size as the number of partitions')
390
391 #ok, dealing with semi-discrete distributed variable. Distribute according to how many
392 #partitions we want
393- for j in range(md.qmu.numberofpartitions):
394+ for j in range(npart):
395 dvar.append(deepcopy(variables))
396
397 # text parsing in dakota requires literal "'identifier'" not just "identifier"
398@@ -33,23 +42,11 @@
399 dvar[-1].descriptor = "'" + str(variables.descriptor) + '_' + str(j + 1) + "'"
400
401 if isinstance(variables, uniform_uncertain):
402- if type(variables.lower) in [list, np.ndarray]:
403- dvar[-1].lower = variables.lower[j]
404- else:
405- dvar[-1].lower = variables.lower
406- if type(variables.upper) in [list, np.ndarray]:
407- dvar[-1].upper = variables.upper[j]
408- else:
409- dvar[-1].upper = variables.upper
410+ dvar[-1].lower = variables.lower[j]
411+ dvar[-1].upper = variables.upper[j]
412 elif isinstance(variables, normal_uncertain):
413- if type(variables.stddev) in [list, np.ndarray]:
414- dvar[-1].stddev = variables.stddev[j]
415- else:
416- dvar[-1].stddev = variables.stddev
417- if type(variables.mean) in [list, np.ndarray]:
418- dvar[-1].mean = variables.mean[j]
419- else:
420- dvar[-1].mean = variables.mean
421+ dvar[-1].stddev = variables.stddev[j]
422+ dvar[-1].mean = variables.mean[j]
423 else:
424 dvar.append(deepcopy(variables))
425
426Index: ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py
427===================================================================
428--- ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py (revision 25010)
429+++ ../trunk-jpl/src/m/qmu/setupdesign/QmuSetupResponses.py (revision 25011)
430@@ -1,5 +1,6 @@
431+from copy import deepcopy
432 from MatlabFuncs import *
433-from copy import deepcopy
434+from qmupart2npart import *
435
436
437 def QmuSetupResponses(md, dresp, responses):
438@@ -7,15 +8,18 @@
439 #get descriptor
440 descriptor = responses.descriptor
441
442- #decide whether this is a distributed response, which will drive whether we expand it into npart values,
443- #or if we just carry it forward as is.
444+ # Decide whether this is a distributed response, which will drive whether
445+ # we expand it into npart values, or if we just carry it forward as is.
446
447 #ok, key off according to type of descriptor:
448 if strncmp(descriptor, 'scaled_', 7):
449 #we have a scaled response, expand it over the partition.
450- #ok, dealing with semi - discrete distributed response. Distribute according to how many
451- #partitions we want
452- for j in range(md.qmu.numberofpartitions):
453+
454+ # Ok, dealing with semi-discrete distributed response. Distribute
455+ # according to how many partitions we want.
456+ npart = qmupart2npart(responses.partition)
457+
458+ for j in range(npart):
459 dresp.append(deepcopy(responses))
460 dresp[-1].descriptor = str(responses.descriptor) + '_' + str(j + 1)
461 else:
462Index: ../trunk-jpl/src/m/qmu/preqmu.py
463===================================================================
464--- ../trunk-jpl/src/m/qmu/preqmu.py (revision 25010)
465+++ ../trunk-jpl/src/m/qmu/preqmu.py (revision 25011)
466@@ -1,10 +1,12 @@
467 import os
468-from MatlabFuncs import *
469+
470+from dakota_in_data import *
471+from expandresponses import *
472 from expandvariables import *
473-from expandresponses import *
474 from helpers import *
475-from dakota_in_data import *
476+from MatlabFuncs import *
477 from process_qmu_response_data import *
478+from qmupart2npart import *
479
480
481 def preqmu(md, options):
482@@ -105,10 +107,42 @@
483 responsedescriptors.append(fieldresponses[j].descriptor)
484 #}}}
485
486+ # Build a list of variable partitions
487+ variablepartitions = []
488+ variablepartitions_npart = []
489+ variable_fieldnames = fieldnames(md.qmu.variables)
490+ for in range(len(variable_fieldnames)):
491+ field_name = variable_fieldnames[i]
492+ fieldvariable = getattr(md.qmu.variables, field_name)
493+ if fieldvariable.isscaled():
494+ variablepartitions.append(fieldvariable.partition)
495+ variablepartitions_npart.append(qmupart2npart(fieldvariable.partition))
496+ else:
497+ variablepartitions.append([])
498+ variablepartitions_npart.append(0)
499+
500+ # Build a list of response partitions
501+ responsepartitions = []
502+ responsepartitions_npart = []
503+ response_fieldnames = fieldnames(md.qmu.responses)
504+ for in range(len(response_fieldnames)):
505+ field_name = response_fieldnames[i]
506+ fieldresponse = getattr(md.qmu.variables, field_name)
507+ if fieldresponse.isscaled():
508+ responsepartitions.append(fieldresponse.partition)
509+ responsepartitions_npart.append(qmupart2npart(fieldresponse.partition))
510+ else:
511+ responsepartitions.append([])
512+ responsepartitions_npart.append(0)
513+
514 # register the fields that will be needed by the Qmu model.
515 md.qmu.numberofresponses = numresponses
516 md.qmu.variabledescriptors = variabledescriptors
517+ md.qmu.variablepartitions = variablepartitions
518+ md.qmu.variablepartitions_npart = variablepartitions_npart
519 md.qmu.responsedescriptors = responsedescriptors
520+ md.qmu.responsepartitions = responsepartitions
521+ md.qmu.responsepartitions_npart = responsepartitions_npart
522
523 # now, we have to provide all the info necessary for the solutions to compute the
524 # responses. For ex, if mass_flux is a response, we need a profile of points.
Note: See TracBrowser for help on using the repository browser.