Changeset 23189 for issm/trunk/src/m/classes/qmu.py
- Timestamp:
- 08/28/18 09:45:51 (7 years ago)
- Location:
- issm/trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk
- Property svn:mergeinfo changed
/issm/trunk-jpl merged: 22823-22871,22873-22887,22894-22903,22905-23090,23092-23185,23187
- Property svn:mergeinfo changed
-
issm/trunk/src
- Property svn:mergeinfo changed
-
issm/trunk/src/m/classes/qmu.py
r21341 r23189 1 1 import numpy as np 2 from MatlabFuncs import * 3 from IssmConfig import * 2 4 from project3d import project3d 3 5 from collections import OrderedDict … … 5 7 from checkfield import checkfield 6 8 from WriteData import WriteData 9 from helpers import * 7 10 8 11 class qmu(object): … … 16 19 def __init__(self): # {{{ 17 20 self.isdakota = 0 18 self.variables = Ordered Dict()19 self.responses = Ordered Dict()21 self.variables = OrderedStruct() 22 self.responses = OrderedStruct() 20 23 self.method = OrderedDict() 21 24 self.params = OrderedDict() … … 117 120 return 118 121 119 if not md.qmu.params.evaluation_concurrency==1: 120 md.checkmessage("concurrency should be set to 1 when running dakota in library mode") 121 if md.qmu.partition: 122 if not np.size(md.qmu.partition)==md.mesh.numberofvertices: 123 md.checkmessage("user supplied partition for qmu analysis should have size md.mesh.numberofvertices x 1") 124 if not min(md.qmu.partition)==0: 122 version=IssmConfig('_DAKOTA_VERSION_') 123 version=float(version[0]) 124 125 if version < 6: 126 if not md.qmu.params.evaluation_concurrency==1: 127 md.checkmessage("concurrency should be set to 1 when running dakota in library mode") 128 else: 129 if not strcmpi(self.params.evaluation_scheduling,'master'): 130 md.checkmessage('evaluation_scheduling in qmu.params should be set to "master"') 131 132 if md.cluster.np <= 1: 133 md.checkmessage('in parallel library mode, Dakota needs to run on at least 2 cpus, 1 cpu for the master, 1 cpu for the slave. Modify md.cluser.np accordingly.') 134 135 if self.params.processors_per_evaluation < 1: 136 md.checkmessage('in parallel library mode, Dakota needs to run at least one slave on one cpu (md.qmu.params.processors_per_evaluation >=1)!') 137 138 if np.mod(md.cluster.np-1,self.params.processors_per_evaluation): 139 md.checkmessage('in parallel library mode, the requirement is for md.cluster.np = md.qmu.params.processors_per_evaluation * number_of_slaves, where number_of_slaves will automatically be determined by Dakota. Modify md.cluster.np accordingly') 140 141 if np.size(md.qmu.partition) > 0: 142 if np.size(md.qmu.partition)!=md.mesh.numberofvertices and np.size(md.qmu.partition) != md.mesh.numberofelements: 143 md.checkmessage("user supplied partition for qmu analysis should have size (md.mesh.numberofvertices x 1) or (md.mesh.numberofelements x 1)") 144 if not min(md.qmu.partition.flatten())==0: 125 145 md.checkmessage("partition vector not indexed from 0 on") 126 if max(md.qmu.partition )>=md.qmu.numberofpartitions:146 if max(md.qmu.partition.flatten())>=md.qmu.numberofpartitions: 127 147 md.checkmessage("for qmu analysis, partitioning vector cannot go over npart, number of partition areas") 128 129 if md.cluster.name!='none':130 if not md.settings.waitonlock:131 md.checkmessage("waitonlock should be activated when running qmu in parallel mode!")132 148 133 149 return md … … 143 159 WriteData(fid,prefix,'object',self,'fieldname','variabledescriptors','format','StringArray') 144 160 WriteData(fid,prefix,'object',self,'fieldname','responsedescriptors','format','StringArray') 145 if not self.mass_flux_segments:161 if not isempty(self.mass_flux_segments): 146 162 WriteData(fid,prefix,'data',self.mass_flux_segments,'name','md.qmu.mass_flux_segments','format','MatArray'); 147 163 flag=True;
Note:
See TracChangeset
for help on using the changeset viewer.