Changeset 23095
- Timestamp:
- 08/16/18 15:41:58 (7 years ago)
- Location:
- issm/trunk-jpl/src/m
- Files:
-
- 44 added
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/classes/balancethickness.py
r22122 r23095 17 17 18 18 self.omega = float('NaN') 19 self.slopex = float('NaN') 20 self.slopey = float('NaN') 19 21 20 22 #set defaults … … 56 58 WriteData(fid,prefix,'object',self,'fieldname','thickening_rate','format','DoubleMat','mattype',1,'scale',1./yts) 57 59 WriteData(fid,prefix,'object',self,'fieldname','stabilization','format','Integer') 58 WriteData(fid,prefix,'object',self,'fieldname','omega','format','DoubleMat','mattype',1); 60 WriteData(fid,prefix,'object',self,'fieldname','slopex','format','DoubleMat','mattype',1) 61 WriteData(fid,prefix,'object',self,'fieldname','slopey','format','DoubleMat','mattype',1) 62 WriteData(fid,prefix,'object',self,'fieldname','omega','format','DoubleMat','mattype',1) 59 63 # }}} -
issm/trunk-jpl/src/m/classes/clusters/generic.py
r21576 r23095 72 72 executable='issm.exe'; 73 73 if isdakota: 74 version=IssmConfig('_DAKOTA_VERSION_') [0:2]75 version=float(version )74 version=IssmConfig('_DAKOTA_VERSION_') 75 version=float(version[0]) 76 76 if version>=6: 77 77 executable='issm_dakota.exe' -
issm/trunk-jpl/src/m/classes/fourierlove.py
r22383 r23095 12 12 13 13 def __init__(self): # {{{ 14 self.nfreq = float('NaN');15 self.frequencies = float('NaN');16 self.sh_nmax = float('NaN');17 self.sh_nmin = float('NaN');18 self.g0 = float('NaN');19 self.r0 = float('NaN');20 self.mu0 = float('NaN');21 self.allow_layer_deletion = float('NaN');22 self.love_kernels = float('NaN');23 self.forcing_type = float('NaN');14 self.nfreq = 0; 15 self.frequencies = 0; 16 self.sh_nmax = 0; 17 self.sh_nmin = 0; 18 self.g0 = 0; 19 self.r0 = 0; 20 self.mu0 = 0; 21 self.allow_layer_deletion = 0; 22 self.love_kernels = 0; 23 self.forcing_type = 0; 24 24 25 25 #set defaults -
issm/trunk-jpl/src/m/classes/inversion.py
r21303 r23095 163 163 WriteData(fid,prefix,'object',self,'fieldname','iscontrol','format','Boolean') 164 164 WriteData(fid,prefix,'object',self,'fieldname','incomplete_adjoint','format','Boolean') 165 WriteData(fid,prefix,'object',self,'fieldname','vel_obs','format','DoubleMat','mattype',1,'scale',1./yts) 165 166 if not self.iscontrol: 166 167 return -
issm/trunk-jpl/src/m/classes/model.py
r22955 r23095 218 218 return self 219 219 # }}} 220 def extract(md,area): # {{{ 220 #@staticmethod 221 def extract(self,area): # {{{ 221 222 """ 222 223 extract - extract a model according to an Argus contour or flag list … … 240 241 241 242 #copy model 242 md1=copy.deepcopy( md)243 md1=copy.deepcopy(self) 243 244 244 245 #get elements that are inside area … … 366 367 367 368 #Edges 368 if md .mesh.domaintype()=='2Dhorizontal':369 if md1.mesh.domaintype()=='2Dhorizontal': 369 370 if np.ndim(md2.mesh.edges)>1 and np.size(md2.mesh.edges,axis=1)>1: #do not use ~isnan because there are some np.nans... 370 371 #renumber first two columns -
issm/trunk-jpl/src/m/classes/qmu.py
r22840 r23095 1 1 import numpy as np 2 from MatlabFuncs import * 3 from IssmConfig import * 2 4 from project3d import project3d 3 5 from collections import OrderedDict … … 5 7 from checkfield import checkfield 6 8 from WriteData import WriteData 9 from helpers import * 7 10 8 11 class qmu(object): … … 16 19 def __init__(self): # {{{ 17 20 self.isdakota = 0 18 self.variables = Ordered Dict()19 self.responses = Ordered Dict()21 self.variables = OrderedStruct() 22 self.responses = OrderedStruct() 20 23 self.method = OrderedDict() 21 24 self.params = OrderedDict() … … 117 120 return 118 121 119 if not md.qmu.params.evaluation_concurrency==1: 120 md.checkmessage("concurrency should be set to 1 when running dakota in library mode") 121 if md.qmu.partition: 122 if not np.size(md.qmu.partition)==md.mesh.numberofvertices: 123 md.checkmessage("user supplied partition for qmu analysis should have size md.mesh.numberofvertices x 1") 124 if not min(md.qmu.partition)==0: 122 version=IssmConfig('_DAKOTA_VERSION_') 123 version=float(version[0]) 124 125 if version < 6: 126 if not md.qmu.params.evaluation_concurrency==1: 127 md.checkmessage("concurrency should be set to 1 when running dakota in library mode") 128 else: 129 if not strcmpi(self.params.evaluation_scheduling,'master'): 130 md.checkmessage('evaluation_scheduling in qmu.params should be set to "master"') 131 132 if md.cluster.np <= 1: 133 md.checkmessage('in parallel library mode, Dakota needs to run on at least 2 cpus, 1 cpu for the master, 1 cpu for the slave. Modify md.cluser.np accordingly.') 134 135 if self.params.processors_per_evaluation < 1: 136 md.checkmessage('in parallel library mode, Dakota needs to run at least one slave on one cpu (md.qmu.params.processors_per_evaluation >=1)!') 137 138 if np.mod(md.cluster.np-1,self.params.processors_per_evaluation): 139 md.checkmessage('in parallel library mode, the requirement is for md.cluster.np = md.qmu.params.processors_per_evaluation * number_of_slaves, where number_of_slaves will automatically be determined by Dakota. Modify md.cluster.np accordingly') 140 141 if np.size(md.qmu.partition) > 0: 142 if np.size(md.qmu.partition)!=md.mesh.numberofvertices and np.size(md.qmu.partition) != md.mesh.numberofelements: 143 md.checkmessage("user supplied partition for qmu analysis should have size (md.mesh.numberofvertices x 1) or (md.mesh.numberofelements x 1)") 144 if not min(md.qmu.partition.flatten())==0: 125 145 md.checkmessage("partition vector not indexed from 0 on") 126 if max(md.qmu.partition )>=md.qmu.numberofpartitions:146 if max(md.qmu.partition.flatten())>=md.qmu.numberofpartitions: 127 147 md.checkmessage("for qmu analysis, partitioning vector cannot go over npart, number of partition areas") 128 148 … … 139 159 WriteData(fid,prefix,'object',self,'fieldname','variabledescriptors','format','StringArray') 140 160 WriteData(fid,prefix,'object',self,'fieldname','responsedescriptors','format','StringArray') 141 if not self.mass_flux_segments:161 if not isempty(self.mass_flux_segments): 142 162 WriteData(fid,prefix,'data',self.mass_flux_segments,'name','md.qmu.mass_flux_segments','format','MatArray'); 143 163 flag=True; -
issm/trunk-jpl/src/m/dev/devpath.py
r22293 r23095 33 33 34 34 #Manual imports for commonly used functions 35 from runme import runme #first because plotmodel may fail 35 36 from plotmodel import plotmodel 36 from runme import runme37 37 38 38 #c = get_ipython().config -
issm/trunk-jpl/src/m/miscellaneous/normfit_issm.m
r15106 r23095 58 58 end 59 59 end 60 61 60 end -
issm/trunk-jpl/src/m/miscellaneous/prctile_issm.m
r18067 r23095 8 8 9 9 catch me 10 11 10 if length(size(x)) > 2 12 11 error('Number of dimensions %d not implemented.',length(size(x))); … … 50 49 51 50 % fill in high and low values 52 53 51 y(p<xi(1),:)=repmat(x(1,:),nnz(p<xi(1)),1); 54 52 y(p>xi(n),:)=repmat(x(n,:),nnz(p>xi(n)),1); … … 83 81 end 84 82 end 85 86 83 end -
issm/trunk-jpl/src/m/qmu/dakota_in_write.m
r22176 r23095 156 156 157 157 % write response levels 158 159 158 if strcmp(dmeth.type,'nond') 160 159 for i=1:length(dmeth.responses) -
issm/trunk-jpl/src/m/qmu/process_qmu_response_data.m
r13646 r23095 43 43 %ok, process the domains named in qmu_mass_flux_profiles, to build a list of segments (MatArray) 44 44 md.qmu.mass_flux_segments=cell(num_mass_flux,1); 45 45 md.qmu.mass_flux_segments 46 46 for i=1:num_mass_flux, 47 47 md.qmu.mass_flux_segments{i}=MeshProfileIntersection(md.mesh.elements,md.mesh.x,md.mesh.y,[md.qmu.mass_flux_profile_directory '/' md.qmu.mass_flux_profiles{i}]); 48 48 end 49 49 md.qmu.mass_flux_segments 50 50 end -
issm/trunk-jpl/src/m/qmu/vector_write.m
r13646 r23095 1 2 % %function to write a vector on multiple lines3 1 % 2 % function to write a vector on multiple lines 3 % 4 4 function []=vector_write(fidi,sbeg,vec,nmax,cmax) 5 5 … … 22 22 23 23 % assemble each line, flushing when necessary 24 25 24 for i=1:numel(vec) 26 25 if isnumeric(vec(i)) -
issm/trunk-jpl/src/m/solve/WriteData.py
r22273 r23095 332 332 fid.write(struct.pack('i',len(data))) 333 333 334 #write each matrix:335 334 for matrix in data: 336 335 if isinstance(matrix,(bool,int,long,float)): … … 342 341 343 342 s=matrix.shape 344 if np.ndim(data) == 1: 343 344 if np.ndim(matrix) == 1: 345 345 fid.write(struct.pack('i',s[0])) 346 346 fid.write(struct.pack('i',1)) -
issm/trunk-jpl/src/m/solve/loadresultsfromcluster.py
r20820 r23095 3 3 import platform 4 4 from loadresultsfromdisk import loadresultsfromdisk 5 6 from helpers import * 5 7 6 8 def loadresultsfromcluster(md,runtimename=False): … … 23 25 filelist.append(md.miscellaneous.name+'.qmu.err') 24 26 filelist.append(md.miscellaneous.name+'.qmu.out') 25 if 'tabular_graphics_data' in md.qmu.params:26 if md.qmu.params ['tabular_graphics_data']:27 if 'tabular_graphics_data' in fieldnames(md.qmu.params): 28 if md.qmu.params.tabular_graphics_data: 27 29 filelist.append('dakota_tabular.dat') 28 30 else: … … 31 33 32 34 #If we are here, no errors in the solution sequence, call loadresultsfromdisk. 33 if os.path.getsize(md.miscellaneous.name+'.outbin')>0: 34 md=loadresultsfromdisk(md,md.miscellaneous.name+'.outbin') 35 else: 36 print 'WARNING, outbin file is empty for run '+md.miscellaneous.name 35 if os.path.exists(md.miscellaneous.name+'.outbin'): 36 if os.path.getsize(md.miscellaneous.name+'.outbin')>0: 37 md=loadresultsfromdisk(md,md.miscellaneous.name+'.outbin') 38 else: 39 print 'WARNING, outbin file is empty for run '+md.miscellaneous.name 40 elif not md.qmu.isdakota: 41 print 'WARNING, outbin file does not exist '+md.miscellaneous.name 37 42 38 43 #erase the log and output files 39 44 if md.qmu.isdakota: 40 filename=os.path.join('qmu'+str(os.getpid()),md.miscellaneous.name) 45 #filename=os.path.join('qmu'+str(os.getpid()),md.miscellaneous.name) 46 filename = md.miscellaneous.name 47 48 # this will not work like normal as dakota doesn't generate outbin files, 49 # instead calls postqmu to store results directly in the model 50 # at md.results.dakota via dakota_out_parse 51 md=loadresultsfromdisk(md,md.miscellaneous.name+'.outbin') 41 52 else: 42 53 filename=md.miscellaneous.name … … 52 63 if hostname==cluster.name: 53 64 if md.qmu.isdakota: 54 filename=os.path.join('qmu'+str(os.getpid()),md.miscellaneous.name) 65 #filename=os.path.join('qmu'+str(os.getpid()),md.miscellaneous.name) 66 filename = md.miscellaneous.name 55 67 TryRem('.queue',filename) 56 68 else: … … 62 74 TryRem('.bat',filename) 63 75 76 # remove this for bin file debugging 64 77 TryRem('.bin',filename) 78 79 #cwd = os.getcwd().split('/')[-1] 80 if md.qmu.isdakota: 81 os.chdir('..') 82 #TryRem('',cwd) 65 83 66 84 return md 67 85 68 86 def TryRem(extension,filename): 69 try: 87 try: 70 88 os.remove(filename+extension) 71 89 except OSError: -
issm/trunk-jpl/src/m/solve/loadresultsfromdisk.py
r21069 r23095 3 3 from parseresultsfromdisk import parseresultsfromdisk 4 4 import MatlabFuncs as m 5 from postqmu import * 5 6 6 7 def loadresultsfromdisk(md,filename): … … 57 58 #post processes qmu results if necessary 58 59 else: 59 md=postqmu(md) 60 os.chdir('..') 60 md=postqmu(md,filename) 61 61 62 62 return md -
issm/trunk-jpl/src/m/solve/marshall.py
r22004 r23095 11 11 marshall(md) 12 12 """ 13 14 print "marshalling file '%s.bin'." % md.miscellaneous.name13 if md.verbose.solution: 14 print "marshalling file '%s.bin'." % md.miscellaneous.name 15 15 16 16 #open file for binary writing -
issm/trunk-jpl/src/m/solve/solve.py
r22133 r23095 7 7 from waitonlock import waitonlock 8 8 from loadresultsfromcluster import loadresultsfromcluster 9 from preqmu import * 10 #from MatlabFuncs import * 9 11 10 12 def solve(md,solutionstring,*args): … … 88 90 #check model consistency 89 91 if options.getfieldvalue('checkconsistency','yes')=='yes': 90 print "checking model consistency" 92 if md.verbose.solution: 93 print "checking model consistency" 91 94 ismodelselfconsistent(md) 92 95 … … 146 149 print 'The results must be loaded manually with md=loadresultsfromcluster(md).' 147 150 else: #load results 148 print 'loading results from cluster' 151 if md.verbose.solution: 152 print 'loading results from cluster' 149 153 md=loadresultsfromcluster(md) 150 154 151 155 #post processes qmu results if necessary 152 156 if md.qmu.isdakota: 153 if not strncmpi(options ['keep'],'y',1):157 if not strncmpi(options.getfieldvalue('keep','y'),'y',1): 154 158 shutil.rmtree('qmu'+str(os.getpid())) 155 159
Note:
See TracChangeset
for help on using the changeset viewer.