Changeset 26857
- Timestamp:
- 02/07/22 04:44:59 (3 years ago)
- Location:
- issm/trunk-jpl
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.m
r26761 r26857 1 1 function export_netCDF(md,filename) 2 2 %verbosity of the code, 0 is no messages, 5 is chatty 3 verbose = 5;3 verbose = 0; 4 4 if exist(filename), 5 5 delete(filename) … … 165 165 166 166 elseif isa(Var,'struct') % structures need special treatment 167 167 168 if strcmp(groups{i}, 'results'), 168 169 klasstring='results.results'; 169 170 netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring); 171 Listsize= length(md.(groups{i}).(fields{j})); 170 172 subgroupname=fields{j}; 171 173 subgroupID=netcdf.defGrp(groupID,subgroupname); 172 klasstring ='results.solutionstep';174 klasstring='results.solutionstep'; 173 175 netcdf.putAtt(subgroupID,netcdf.getConstant('NC_GLOBAL'),'classtype',klasstring); 174 176 subfields=fieldnames(md.(groups{i}).(fields{j})); … … 178 180 end 179 181 for k=1:length(subfields), 180 if verbose > 4, 181 disp(sprintf("=@@=creating var for %s.%s.%s",groups{i}, fields{j}, subfields{k})); 182 end 183 Var = md.(groups{i}).(fields{j}).(subfields{k}); 184 [DimSize,DimValue,varid]=CreateVar(ncid,Var,subgroupID,subfields{k},DimSize,DimValue); 185 if ~isempty(varid), 186 FillVar(Var,subgroupID,varid); 182 if ~ismember(subfields{k}, {'errlog', 'outlog'}) 183 StackedVar=restable(); 184 for l=1:Listsize 185 Var = md.(groups{i}).(fields{j})(l).(subfields{k}); 186 lastindex=l; 187 StackedVar=StackedVar.update(Var); 188 end 189 if verbose > 4, 190 disp(sprintf("=@@=creating var for %s.%s.%s",groups{i}, fields{j}, subfields{k})); 191 end 192 StackedVar=StackedVar.finalize(lastindex); 193 %StackedVar=StackedVar'; %transposing to get time as first dimension 194 [DimSize,DimValue,varid]=CreateVar(ncid,StackedVar,subgroupID,subfields{k},DimSize,DimValue); 195 if ~isempty(varid), 196 FillVar(StackedVar,subgroupID,varid); 197 end 198 187 199 end 188 200 end … … 323 335 [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue); 324 336 varid = netcdf.defVar(groupID,field,'NC_CHAR',dims); 337 if numel(Var)>1 338 netcdf.defVarDeflate(groupID,varid,true,true,4); 339 end 325 340 end 326 341 … … 329 344 [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue); 330 345 varid = netcdf.defVar(groupID,field,'NC_INT64',dims); 346 if numel(Var)>1 347 netcdf.defVarDeflate(groupID,varid,true,true,4); 348 end 331 349 else 332 350 [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue); 333 351 varid = netcdf.defVar(groupID,field,'NC_DOUBLE',dims); 352 if numel(Var)>1 353 netcdf.defVarDeflate(groupID,varid,true,true,4); 354 end 334 355 end 335 356 elseif isa(Var,'cell'), 336 [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue); 337 varid = netcdf.defVar(groupID,field,'NC_CHAR',dims); 338 357 % cells can be a range of things, what are we dealing with here 358 if isempty(Var), 359 netcdf.putAtt(groupID,netcdf.getConstant('NC_GLOBAL'),field,'emptycell'); 360 varid=[]; 361 else 362 [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue); 363 if isa(Var{1}, 'double'), 364 varid = netcdf.defVar(groupID,field,'NC_DOUBLE',dims); 365 if numel(Var)>1 366 netcdf.defVarDeflate(groupID,varid,true,true,4); 367 end 368 else 369 varid = netcdf.defVar(groupID,field,'NC_CHAR',dims); 370 if numel(Var)>1 371 netcdf.defVarDeflate(groupID,varid,true,true,4); 372 end 373 end 374 end 339 375 elseif isa(Var,'struct'), 340 376 if isempty(fieldnames(Var)), … … 345 381 [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue); 346 382 varid = netcdf.defVar(groupID,field,'NC_CHAR',dims); 383 if numel(Var)>1 384 netcdf.defVarDeflate(groupID,varid,true,true,4); 385 end 347 386 end 348 387 else … … 370 409 elseif isa(Var,'char'), % at this point this should be a character array 371 410 netcdf.putVar(groupID,varid,Var); 372 elseif isa(Var,'cell'), 373 if ~isempty(Var), 374 if length(Var)==0, 375 netcdf.putVar(groupID,varid,0,9,'emptycell'); 411 elseif isa(Var,'cell'), % there can be a number of things in a cell array 412 for i=1:length(Var), 413 if isa(Var{i},'char') %for characters we limit the size to 40 for now 414 if length(Var)>1, 415 count=[min(length(Var{i}),40), 1]; 416 startpoint=[0 i-1]; 417 else 418 count=min(length(Var{i}),40); 419 startpoint=0; 420 end 421 422 if length(Var{i})>40, 423 netcdf.putVar(groupID,varid,startpoint,count,Var{i}(1:40)); 424 disp(sprintf('some variable have been truncated')); 425 else 426 netcdf.putVar(groupID,varid,startpoint,count,Var{i}); 427 end 428 elseif isa(Var{i},'double') 429 startpoint=[i-1]; 430 count=[1 length(Var{i}) ndims(Var{i})] 431 for j=1:ndims(Var{i}), 432 startpoint=[startpoint 0] 433 end 434 netcdf.putVar(groupID,varid,startpoint,count,Var{i}); 376 435 else 377 for i=1:length(Var), 378 if length(Var)>1, 379 count=[min(length(Var{i}),40), 1]; 380 startpoint=[0 i-1]; 381 else 382 count=min(length(Var{i}),40); 383 startpoint=0; 384 end 385 386 if length(Var{i})>40, 387 netcdf.putVar(groupID,varid,startpoint,count,Var{i}(1:40)); 388 disp(sprintf('some variable have been truncated')); 389 else 390 netcdf.putVar(groupID,varid,startpoint,count,Var{i}); 391 end 392 end 436 disp(sprintf("WARNING: cell of class %s is not supported.",class(Var{i}))) 393 437 end 394 438 end … … 433 477 function [dims,DimSize,DimValue]=GetDims(ncid,Var,DimSize,DimValue) 434 478 dims=[]; 435 % if isa(Var,'cell'), 436 % varsize=size(Var'); 437 % else 438 if isa(Var,'struct') 479 celldims=[]; 480 dim=ndims(Var); 481 if isa(Var,'struct'), 439 482 varsize=length(fieldnames(Var)); 440 483 else 441 484 varsize=size(Var); 442 end 443 444 % dim=sum(varsize>1); 445 dim=ndims(Var); 485 if isa(Var, 'cell') 486 %we add the dimension of the cells themselves, 487 %that will most probably fail if cells have different sizes 488 for i=1:dim, 489 newdim=size(Var{i}); 490 if ~ismember(newdim, celldims), 491 celldims=[celldims newdim]; 492 end 493 end 494 end 495 end 496 varsize=[varsize celldims]; 497 alldim=length(varsize); 446 498 if dim>0, 447 for i=1: dim,448 if size(Var, i) >1499 for i=1:alldim, 500 if size(Var, i)>1 || i>dim, %we skip dimensions with zero lenght but want to add dimensions from cells 449 501 indsize=find(varsize(i)==DimValue); 450 502 if length(indsize)>0 … … 460 512 end 461 513 end 462 %if we have an empty cell variable we need to add a stringlength for the no data 463 % if isa(Var,'cell'), 464 % if isempty(dims) 465 % dims=[DimSize(4).index]; 466 % else 467 % dims=[DimSize(4).index dims]; 468 % end 469 % end 514 if isa(Var, 'cell') && isa(Var{1}, 'char'), 515 %if we have an cell variable with strings we need to add a stringlength 516 dims=[dims DimSize(4).index]; 517 end 470 518 % struct also need an extra dimension 2, but only if non empty 471 519 if isa(Var,'struct'), -
issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.py
r26761 r26857 13 13 14 14 def update(self, stepvar): 15 #if we have a scalar we just add it to the en 15 #if we have a scalar we just add it to the end 16 16 #we save the size of the current step for further treatment 17 17 if len(np.shape(stepvar)) == 0: … … 21 21 #we save the size of the current step for further treatment 22 22 else: 23 self.sizes.append( len(stepvar))24 for r in stepvar:25 self.data.append(r)23 self.sizes.append([np.shape(stepvar)]) 24 stackdat = np.squeeze(stepvar.flatten()) 25 self.data = np.hstack((self.data, stackdat)) 26 26 27 27 def finalize(self, rows): 28 28 #we have more scalars than steps, so we have an array 29 29 if len(self.data) > rows: 30 datasize = np.squeeze(self.sizes) 31 maxsize = [] 32 33 try: 34 dims = np.arange(np.shape(datasize)[1]) 35 for dim in dims: 36 maxsize.append(np.nanmax(datasize[:, dim])) 37 except IndexError: 38 maxsize.append(np.nanmax(datasize[:])) 39 40 findim = np.insert(maxsize, 0, rows) 41 30 42 #first check if all steps are the same size 31 SameSize = np.sum(np.a sarray(self.sizes) - self.sizes[0]) == 043 SameSize = np.sum(np.abs(datasize - datasize[0])) == 0 32 44 if SameSize: 33 45 #same size for all steps, just reshape 34 return np.reshape(self.data, newshape=( rows, int(len(self.data) / rows)))46 return np.reshape(self.data, newshape=(findim)) 35 47 else: 36 48 #different sizes at each steps, first create a table big enough for the biggest step 37 49 startpoint = 0 38 datadim = len(np.shape(self.data)) 39 if datadim == 1: 40 outdat = np.nan * np.ones((rows, np.nanmax(self.sizes))) 41 for step in range(rows): 42 curlen = self.sizes[step] 43 outdat[step, :curlen] = self.data[startpoint: startpoint + curlen] 44 startpoint += curlen 45 elif datadim == 2: 46 outdat = np.nan * np.ones((rows, np.nanmax(self.sizes), np.shape(self.data)[1])) 47 for step in range(rows): 48 curlen = self.sizes[step] 49 outdat[step, :curlen, :] = self.data[startpoint: startpoint + curlen] 50 startpoint += curlen 51 52 else: 53 print("ERROR, reult treatment cant cope with dimensions above 2") 50 outdat = np.nan * np.ones(findim) 51 for step in range(rows): 52 slicer = [slice(0, d) for d in datasize[step, :]] 53 slicer = np.insert(slicer, 0, step) 54 curlen = int(np.prod(datasize[step, :])) 55 outdat[tuple(slicer)] = np.reshape(self.data[startpoint:startpoint + curlen], newshape=(datasize[step, :])) 56 startpoint += curlen 54 57 return outdat 55 58 #as much scalars as stpes (or less) so just one value per step … … 60 63 def export_netCDF(md, filename): # {{{ 61 64 #verbosity of the code, 0 is no messages, 5 is chatty 62 verbose = 565 verbose = 0 63 66 if path.exists(filename): 64 67 print('File {} allready exist'.format(filename)) … … 308 311 val_type = type(var) 309 312 313 print(val_type) 310 314 # grab dimension 311 315 if val_type in [collections.OrderedDict, dict]: -
issm/trunk-jpl/src/m/io/loadmodel.py
r25455 r26857 9 9 10 10 11 def loadmodel(path, onlylast=False):11 def loadmodel(path, singletime=None): 12 12 """LOADMODEL - load a model 13 13 … … 31 31 # try: 32 32 #recover model on file and name it md 33 struc = loadvars(path, onlylast=onlylast)33 struc = loadvars(path, singletime=singletime) 34 34 name = [key for key in list(struc.keys())] 35 35 if len(name) > 1: -
issm/trunk-jpl/src/m/io/loadvars.py
r26761 r26857 9 9 from netCDF4 import Dataset, chartostring 10 10 import numpy as np 11 from importlib import import_module 11 12 from model import * 12 13 … … 30 31 filename = '' 31 32 nvdict = {} 32 debug = False # print messages if true33 debug = False # print messages if true 33 34 34 35 if len(args) >= 1 and isinstance(args[0], str): … … 52 53 raise TypeError("Unrecognized input arguments.") 53 54 54 onlylast= False55 timeindex = False 55 56 56 57 for key, value in kwargs.items(): 57 if key == ' onlylast':58 onlylast= value58 if key == 'singletime': 59 timeindex = value 59 60 60 61 if whichdb(filename): #We used python pickle for the save … … 107 108 else: 108 109 #Time dimension is in all the variables so we take that as stepnumber for the results 109 if onlylast: #we load only the last result to save on time and memory110 if timeindex: #we load only the last result to save on time and memory 110 111 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [getattr(classtype[mod][1], listtype)()] 111 112 Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] … … 127 128 Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] 128 129 else: 129 if onlylast: #we load only the last result to save on time and memory130 if timeindex: #we load only the last result to save on time and memory 130 131 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [getattr(classtype[mod][1], listtype)()] 131 132 Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] … … 155 156 print("WARNING: md.{}.{} is not initialized, hopefully that was done in the main group:".format(classtree[mod][0], classtree[mod][1])) 156 157 Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] 158 elif classtype[mod][0] == 'SMBgemb.SMBgemb': 159 curclass = NCFile.groups[classtree[mod][0]] 160 modulename = split(r'\.', classtype[mod][0])[0] 161 nvdict['md'].__dict__[mod] = getattr(classtype[mod][1], modulename)(nvdict['md'].__dict__['mesh'], nvdict['md'].__dict__['geometry']) 162 Tree = nvdict['md'].__dict__[classtree[mod][0]] 157 163 else: 158 164 curclass = NCFile.groups[classtree[mod][0]] … … 166 172 #for i in range(0, max(1, len(curclass.groups))): 167 173 if len(curclass.groups) > 0: #that is presumably only for old style NC where each result step had its own group 168 if onlylast: 169 groupclass = [curclass.groups[keylist[len(curclass.groups) - 1]]] 174 if timeindex: 175 if timeindex < 0: 176 groupclass = [curclass.groups[keylist[len(curclass.groups) - timeindex]]] 177 else: 178 groupclass = [curclass.groups[keylist[timeindex]]] 170 179 else: 171 180 groupclass = [curclass.groups[key] for key in keylist] … … 184 193 NewFormat = 'Time' in NCFile.dimensions 185 194 if type(Tree) == list: # and NewFormat: 186 if onlylast:195 if timeindex: 187 196 if NewFormat: 188 197 if vardim == 1: 189 Tree[0].__dict__[str(var)] = varval[-1].data 198 try: 199 Tree[0].__dict__[str(var)] = varval[timeindex].data 200 except IndexError: 201 print('WARNING: No data on index {} for {} reverting to last time.'.format(timeindex, str(var))) 202 Tree[0].__dict__[str(var)] = varval[-1].data 203 190 204 elif vardim == 2: 191 Tree[0].__dict__[str(var)] = varval[ -1, :].data205 Tree[0].__dict__[str(var)] = varval[timeindex, :].data 192 206 elif vardim == 3: 193 Tree[0].__dict__[str(var)] = varval[ -1, :, :].data207 Tree[0].__dict__[str(var)] = varval[timeindex, :, :].data 194 208 else: 195 209 print('table dimension greater than 3 not implemented yet') … … 198 212 else: 199 213 if NewFormat: 200 incomplete = 'Time' not in varval.dimensions and NewFormat 214 print(varval.dimensions) 215 incomplete = 'Time' not in varval.dimensions 201 216 if incomplete: 202 217 chosendim = varval.dimensions[0] … … 224 239 else: 225 240 if vardim == 0: #that is a scalar 226 if str(varval[0]) == '' or str(varval[0]) == '--': #no value241 if str(varval[0]) in['', '--', 'emptycell']: #no value 227 242 Tree.__dict__[str(var)] = [] 228 243 elif varval[0] == 'True': #treatin bool … … 294 309 print(" ==> treating attribute {}".format(attr)) 295 310 if attr != 'classtype': #classtype is for treatment, don't get it back 296 # attribute = str(attr).swapcase() #there is a reason for swapcase, no sure what it isanymore297 # if attr == 'VARNAME':298 # attribute = 'name'299 311 if attr == 'varname': 300 312 attribute = 'name' … … 309 321 Tree[0].__dict__[attribute] = str(listclass.getncattr(attr)) 310 322 else: 311 Tree.__dict__[attribute] = str(listclass.getncattr(attr))312 323 if listclass.getncattr(attr) == 'True': 313 324 Tree.__dict__[attribute] = True 314 325 elif listclass.getncattr(attr) == 'False': 315 326 Tree.__dict__[attribute] = False 327 elif listclass.getncattr(attr) == 'emptycell': 328 Tree.__dict__[attribute] = [] 329 else: 330 Tree.__dict__[attribute] = str(listclass.getncattr(attr)) 316 331 # }}} 317 332 # }}} … … 344 359 try: 345 360 modulename = split(r'\.', class_dict[classe][0])[0] 346 class_dict[classe].append( __import__(modulename))361 class_dict[classe].append(import_module(modulename)) 347 362 except ModuleNotFoundError: 348 363 #submodule probably has a different name 349 364 modulename = str(getattr(NCData.groups[group].groups[subgroup], 'classtype')) 350 365 print("WARNING importing {} rather than {}".format(modulename, class_dict[classe][0])) 351 class_dict[classe].append( __import__(modulename))366 class_dict[classe].append(import_module(modulename)) 352 367 class_tree[classe] = [group, subgroup] 353 368 else: … … 360 375 print("WARNING: module {} does not exist anymore and is skipped".format(modulename)) 361 376 else: 362 class_dict[classe].append( __import__(modulename))377 class_dict[classe].append(import_module(modulename)) 363 378 class_tree[classe] = [group, ] 364 379 except AttributeError: -
issm/trunk-jpl/test/NightlyRun/runme.m
r26409 r26857 2 2 %RUNME - test deck for ISSM nightly runs 3 3 % 4 % In a test deck directory (for example, test/NightlyRun) the following 4 % In a test deck directory (for example, test/NightlyRun) the following 5 5 % command will launch all existing tests, 6 6 % … … 29 29 % 'update': update the archive 30 30 % 'valgrind': check for memory leaks (default value of md.debug.valgrind needs to be changed manually) 31 % 'ncExport': export netCDF file 31 32 % 'stoponerror' 1 or 0 32 33 % … … 69 70 %GET procedure {{{ 70 71 procedure=getfieldvalue(options,'procedure','check'); 71 if ~ismember(procedure,{'check','update','valgrind' })72 if ~ismember(procedure,{'check','update','valgrind','ncExport'}) 72 73 disp('runme warning: procedure not supported, defaulting to test ''check''') 73 74 procedure='check'; … … 217 218 end 218 219 220 %CHECK for memory leaks? 221 elseif strcmpi(procedure,'ncExport'), 222 export_netCDF(md, ['test' num2str(id) 'ma.nc']) 223 219 224 %ELSE: CHECK TEST 220 225 else, -
issm/trunk-jpl/test/NightlyRun/runme.py
r26553 r26857 10 10 from glob import glob 11 11 import os 12 import re 12 from re import search, split 13 13 from sys import float_info 14 14 from traceback import format_exc … … 26 26 from IdToName import IdToName 27 27 from parallelrange import parallelrange 28 from loadmodel import loadmodel 29 from solve import solve 30 from importlib import import_module 28 31 29 32 … … 31 34 """RUNME - test deck for ISSM nightly runs 32 35 33 In a test deck directory (for example, test/NightlyRun) the following 36 In a test deck directory (for example, test/NightlyRun) the following 34 37 command will launch all existing tests, 35 38 … … 41 44 42 45 Options: 43 -i/--id Followed by the list of ids or (parts of) test names 46 -i/--id Followed by the list of ids or (parts of) test names 44 47 requested 45 -e/--exclude Ids or (parts of) test names to be excluded (same 46 format as id). Does nothing if 'id' is specified with 48 -e/--exclude Ids or (parts of) test names to be excluded (same 49 format as id). Does nothing if 'id' is specified with 47 50 different values. 48 51 -b/--benchmark 'all' : (all of the tests) … … 60 63 -p/--procedure 'check' : run the test (default) 61 64 'update' : update the archive 65 'runFromNC' : run from an existing nc file 66 62 67 63 68 Usage: … … 80 85 81 86 TODO: 82 - At '#disp test result', make sure precision of output matches that of 87 - At '#disp test result', make sure precision of output matches that of 83 88 MATLAB. 84 - Check for failures that do not raise exceptions (for example, 'Standard 85 exception'; see also jenkins/jenkins.sh). These should be counted as 89 - Check for failures that do not raise exceptions (for example, 'Standard 90 exception'; see also jenkins/jenkins.sh). These should be counted as 86 91 failures. 87 92 """ … … 97 102 # }}} 98 103 #GET procedure {{{ 99 if procedure not in ['check', 'update' ]:104 if procedure not in ['check', 'update', 'runFromNC']: 100 105 print(("runme warning: procedure '{}' not supported, defaulting to test 'check'.".format(procedure))) 101 106 procedure = 'check' … … 112 117 #GET ids {{{ 113 118 flist = glob('test*.py') #File name must start with 'test' and must end by '.py' and must be different than 'test.py' 114 list_ids = [int( re.search(r'\d+',file.split('.')[0]).group()) for file in flist if not file == 'test.py'] # Keep test id only (skip 'test' and '.py')119 list_ids = [int(search(r'\d+',file.split('.')[0]).group()) for file in flist if not file == 'test.py'] # Keep test id only (skip 'test' and '.py') 115 120 116 121 i1, i2 = parallelrange(rank, numprocs, len(list_ids)) #Get tests for this cpu only … … 165 170 os.chdir(root) 166 171 id_string = IdToName(id) 167 exec(compile(open('test{}.py'.format(id)).read(), 'test{}.py'.format(id), 'exec'), globals()) 172 print(("----------------runing-----------------------")) 173 if procedure == 'runFromNC': 174 Tmod = import_module('test{}'.format(id)) 175 else: 176 exec(compile(open('test{}.py'.format(id)).read(), 'test{}.py'.format(id), 'exec'), globals()) 168 177 169 178 #UPDATE ARCHIVE? … … 185 194 archwrite(archive_file, archive_name + '_field' + str(k + 1), field) 186 195 print(("File {} saved. \n".format(os.path.join('..', 'Archives', archive_name + '.arch')))) 187 196 elif procedure == 'runFromNC': 197 print(("----------------loadingNC-----------------------")) 198 mdl = loadmodel('test{}ma.nc'.format(id)) 199 for key in mdl.results.__dict__.keys(): 200 if 'Solution' in key: 201 solvetype = split('Solution', key)[0] 202 mdl.results = [] 203 mdl = solve(mdl, solvetype) 204 for k, fieldname in enumerate(Tmod.field_names): 205 try: 206 if search(r'\d+$', fieldname): 207 index = int(search(r'\d+$', fieldname).group()) - 1 208 fieldname = fieldname[:search(r'\d+$', fieldname).start()] 209 else: 210 index = 0 211 #Get field from nc run 212 try: 213 field = mdl.results.__dict__[solvetype + 'Solution'][index].__dict__[fieldname] 214 except KeyError: 215 print("WARNING: {} does not exist and checking will be skipped".format(fieldname)) 216 continue 217 #Get reference from std run 218 ref = Tmod.field_values[k] 219 #Get tolerance 220 tolerance = Tmod.field_tolerances[k] 221 error_diff = np.amax(np.abs(ref - field), axis=0) / (np.amax(np.abs(ref), axis=0) + float_info.epsilon) 222 if not np.isscalar(error_diff): 223 error_diff = error_diff[0] 224 225 #disp test result 226 if (np.any(error_diff > tolerance) or np.isnan(error_diff)): 227 print(('ERROR difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname))) 228 errorcount += 1 229 erroredtest_list.append(id) 230 else: 231 print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname))) 232 233 except Exception as message: 234 #something went wrong, print failure message: 235 print((format_exc())) 236 if output == 'nightly': 237 fid = open(os.path.join(ISSM_DIR, 'nightlylog', 'pythonerror.log'), 'a') 238 fid.write('%s' % message) 239 fid.write('\n------------------------------------------------------------------\n') 240 fid.close() 241 print(('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id, id_string, fieldname))) 242 else: 243 print(('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id, id_string, fieldname))) 244 raise RuntimeError(message) 188 245 #ELSE: CHECK TEST 189 246 else:
Note:
See TracChangeset
for help on using the changeset viewer.