Changeset 27610
- Timestamp:
- 02/23/23 20:22:10 (2 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/test/NightlyRun/runme.py
r27443 r27610 1 #!/usr/bin/env python 2 # 3 # NOTE: Switch to, 4 # 5 # #!/usr/bin/python3 6 # 7 # when making Python 3 default 8 # 1 #!/usr/bin/env python3 2 3 9 4 import argparse 10 5 import os … … 31 26 32 27 def runme(id=None, exclude=None, benchmark='nightly', procedure='check', output='none', rank=1, numprocs=1): 33 """ RUNME- test deck for ISSM nightly runs28 """runme - test deck for ISSM nightly runs 34 29 35 30 In a test deck directory (for example, test/NightlyRun) the following … … 91 86 """ 92 87 93 # Get ISSM_DIR variable88 # Get ISSM_DIR variable 94 89 ISSM_DIR = os.environ['ISSM_DIR'] 95 90 96 # Process options97 # GETbenchmark {{{91 # Process options 92 # Get benchmark {{{ 98 93 if benchmark not in ['all', 'nightly', 'validation', 'adolc', 'eismint', 'ismip', 'mesh', 'slc', 'thermal', 'tranforcing', 'qmu']: 99 94 print(("runme warning: benchmark '{}' not supported, defaulting to test 'nightly'.".format(benchmark))) 100 95 benchmark = 'nightly' 101 96 # }}} 102 # GETprocedure {{{97 # Get procedure {{{ 103 98 if procedure not in ['check', 'update', 'runFromNC']: 104 99 print(("runme warning: procedure '{}' not supported, defaulting to test 'check'.".format(procedure))) 105 100 procedure = 'check' 106 101 # }}} 107 # GEToutput {{{102 # Get output {{{ 108 103 if output not in ['nightly', 'none']: 109 104 print(("runme warning: output '{}' not supported, defaulting to test 'none'.".format(output))) 110 105 output = 'none' 111 106 # }}} 112 # GET RANK and NUMPROCS for multithreaded runs {{{107 # Get rank and numprocs for multi-threaded runs {{{ 113 108 if (numprocs < rank): 114 109 numprocs = 1 115 110 # }}} 116 # GETids {{{111 # Get ids {{{ 117 112 flist = [f for f in os.listdir('.') if re.match('test[0-9]+.py', f)] # File name must follow the format "test<integer>.py" 118 113 list_ids = [int(re.search(r'\d+',f.split('.')[0]).group()) for f in flist] # Retrieve test IDs 119 114 i1, i2 = parallelrange(rank, numprocs, len(list_ids)) # Get tests for this CPU only 120 115 list_ids = list_ids[i1:i2 + 1] 121 if np.size(id) > 0 and id is not None:116 if len(id) > 0 and id is not None: 122 117 test_ids = set(GetIds(id)).intersection(set(list_ids)) 123 118 benchmark = None 124 119 else: 125 # if no tests are specifically provided, do them all120 # If no tests are specifically provided, do them all 126 121 test_ids = set(list_ids) 127 122 128 123 # }}} 129 # GET exclude{{{124 # Get excluded tests {{{ 130 125 exclude_ids = GetIds(exclude) 131 126 test_ids = test_ids.difference(exclude_ids) 132 127 # }}} 133 128 if procedure == 'runFromNC': 134 # That is abamg test129 # bamg test 135 130 test_ids = test_ids.difference([119, 514]) 136 # that issmbGEMB format is weird for the test131 # smbGEMB format is weird for the test 137 132 test_ids = test_ids.difference([243, 244, 252, 253]) 138 # those are amrruns where the index is missing from fieldnames133 # AMR runs where the index is missing from fieldnames 139 134 test_ids = test_ids.difference([462, 463, 464, 465]) 140 # test247 solves for thermal and transient which makes it complex to check135 # test247 solves for thermal and transient which makes it complex to check 141 136 test_ids = test_ids.difference([247]) 142 # test 902 is running two models with different stepping137 # test 902 is running two models with different stepping 143 138 test_ids = test_ids.difference([902]) 144 # I have asize issue in 517 needs investigation139 # size issue in 517 needs investigation 145 140 test_ids = test_ids.difference([517]) 146 141 147 # Process Ids according to benchmarks {{{142 # Process IDs according to benchmarks {{{ 148 143 if benchmark == 'nightly': 149 144 test_ids = test_ids.intersection(set(range(1, 1000))) … … 172 167 # }}} 173 168 174 # Loop over tests and launch sequence169 # Loop over tests and launch sequence 175 170 root = os.getcwd() 176 171 errorcount = 0 … … 179 174 print(("----------------starting:{}-----------------------".format(id))) 180 175 try: 181 # Execute test176 # Execute test 182 177 os.chdir(root) 183 178 id_string = IdToName(id) … … 188 183 exec(compile(open('test{}.py'.format(id)).read(), 'test{}.py'.format(id), 'exec'), globals()) 189 184 190 # UPDATE ARCHIVE?185 # Update archive? 191 186 archive_name = 'Archive' + str(id) 192 187 if procedure == 'update': … … 213 208 solvetype = re.split('Solution', key)[0] 214 209 215 # we save the results, scrap them and solve.210 # Save the results, scrap them and solve 216 211 loaded_res = mdl.results 217 212 mdl.results = [] 218 213 mdl = solve(mdl, solvetype) 219 214 220 # we loop on the field_names from the nghtly test215 # Loop on the field_names from the nightly test 221 216 for k, fieldname in enumerate(Tmod.field_names): 222 217 try: 223 # firstlook for indexing218 # First, look for indexing 224 219 if re.search(r'\d+$', fieldname): 225 220 index = int(re.search(r'\d+$', fieldname).group()) - 1 … … 237 232 index = 0 238 233 239 # Thencheck if the key exists in the loaded results234 # Then, check if the key exists in the loaded results 240 235 try: 241 236 reskeys = mdl.results.__dict__[solvetype + 'Solution'][index].__dict__.keys() 242 237 except TypeError: 243 # most probably a steady state so no subscripting238 # Most likely a steady state so no subscripting 244 239 reskeys = mdl.results.__dict__[solvetype + 'Solution'].__dict__.keys() 245 240 if fieldname not in reskeys: … … 259 254 260 255 if fieldname in namedifs.keys(): 261 # Some fields are not consistent256 # Some fields are not consistent 262 257 fieldname = namedifs[fieldname] 263 258 elif any([suf in fieldname for suf in sufixes]): 264 # some test have loops that mess up withnaming259 # Some tests have loops that mess up naming 265 260 try: 266 261 sufix = sufixes[np.squeeze(np.where([suf in fieldname for suf in sufixes]))] 267 262 except TypeError: 268 # probably severalmatches, we take the last one which should be the good one (Needs to be controled in the list above)263 # Probably several matches; we take the last one which should be the one we're want to run (needs to be controlled in the list above) 269 264 sufix = sufixes[np.squeeze(np.where([suf in fieldname for suf in sufixes]))[-1]] 270 265 fieldname = fieldname[:re.search(sufix, fieldname).start()] 271 266 elif fieldname.endswith("P") and index == 1: 272 # we are looking for P2 but 2 as been considered as an index and so shiftedby -1267 # Looking for P2 but 2 refers to an index, so shift by -1 273 268 fieldname = fieldname[:-1] 274 269 else: 275 # could be that the index selected above is part of the name270 # Handle case where index selected above is part of the name 276 271 fieldname = fieldname + str(index + 1) 277 272 try: … … 279 274 loaded_field = loaded_res.__dict__[solvetype + 'Solution'][index].__dict__[fieldname] 280 275 except TypeError: 281 # most probably a steady state so no subscripting276 # Most likely a steady state so no subscripting 282 277 try: 283 278 field = mdl.results.__dict__[solvetype + 'Solution'].__dict__[fieldname] … … 291 286 292 287 ref = Tmod.field_values[k] 293 # Get tolerance288 # Get tolerance 294 289 tolerance = Tmod.field_tolerances[k] 295 # compute differences for the results computed from the nc file290 # Compute differences for the results computed from the nc file 296 291 error_diff = np.amax(np.abs(ref - field), axis=0) / (np.amax(np.abs(ref), axis=0) + float_info.epsilon) 297 292 if not np.isscalar(error_diff): 298 293 error_diff = error_diff[0] 299 294 300 # compute the differences for the results of the nc file295 # Compute the differences for the results of the nc file 301 296 load_diff = np.amax(np.abs(np.squeeze(ref) - loaded_field), axis=0) / (np.amax(np.abs(np.squeeze(ref)), axis=0) + float_info.epsilon) 302 297 if not np.isscalar(load_diff): 303 298 load_diff = load_diff[0] 304 299 305 # disptest result300 # Display test result 306 301 if (np.any(error_diff > tolerance) or np.isnan(error_diff)) and (np.any(load_diff > tolerance) or np.isnan(load_diff)): 307 302 if abs(error_diff - load_diff) < tolerance: … … 321 316 else: 322 317 print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}{}'.format(error_diff, tolerance, id, id_string, fieldname, index + 1))) 323 # disp only if errors forthe results318 # Display only if there are errors in the results 324 319 325 320 except Exception as message: 326 # something went wrong, print failure message:321 # Something went wrong; print failure message 327 322 print((format_exc())) 328 323 if output == 'nightly': … … 335 330 print(('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id, id_string, fieldname))) 336 331 raise RuntimeError(message) 337 # ELSE: CHECK TEST332 # Check test 338 333 else: 339 # load archive334 # Load archive 340 335 if os.path.exists(os.path.join('..', 'Archives', archive_name + '.arch')): 341 336 archive_file = os.path.join('..', 'Archives', archive_name + '.arch') … … 345 340 for k, fieldname in enumerate(field_names): 346 341 try: 347 # Get field and tolerance342 # Get field and tolerance 348 343 field = np.array(field_values[k]) 349 344 if len(field.shape) == 1: … … 354 349 tolerance = field_tolerances[k] 355 350 356 # compare to archive351 # Compare to archive 357 352 # Matlab uses base 1, so use base 1 in labels 358 353 archive = np.array(archread(archive_file, archive_name + '_field' + str(k + 1))) … … 369 364 error_diff = error_diff[0] 370 365 371 # disptest result366 # Display test result 372 367 if (np.any(error_diff > tolerance) or np.isnan(error_diff)): 373 368 print(('ERROR difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname))) … … 378 373 379 374 except Exception as message: 380 # something went wrong, print failure message:375 # Something went wrong; print failure message 381 376 print((format_exc())) 382 377 if output == 'nightly': … … 391 386 392 387 except Exception as message: 393 # something went wrong, print failure message:388 # Something went wrong; print failure message 394 389 print((format_exc())) 395 390 if output == 'nightly': … … 420 415 print(("PYTHONSTARTUP file '{}' does not exist.".format(PYTHONSTARTUP))) 421 416 422 parser = argparse.ArgumentParser(description=' RUNME- test deck for ISSM nightly runs')417 parser = argparse.ArgumentParser(description='runme - test deck for ISSM nightly runs') 423 418 parser.add_argument('-i', '--id', nargs='*', type=str, help='followed by the list of ids requested', default=[]) 424 419 parser.add_argument('-in', '--include_name', nargs='*', type=str, help='followed by the list of test names requested', default=[])
Note:
See TracChangeset
for help on using the changeset viewer.