Changeset 28129


Ignore:
Timestamp:
03/05/24 17:20:06 (13 months ago)
Author:
jdquinn
Message:

CHG: Corrections to how test suites are parsed under Python

Location:
issm/trunk-jpl
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • issm/trunk-jpl/jenkins/mac-intel-dakota

    r28127 r28129  
    9999#
    100100MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234 235 418 420 444 445 2006]"
    101 PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 417 418 420 444 445 2006"
     101PYTHON_NROPTIONS="--id Dakota --exclude 234 417 418 420 444 445 2006"
  • issm/trunk-jpl/jenkins/mac-silicon-dakota

    r28079 r28129  
    9494#
    9595MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234 235 418 420 444 445 2006]"
    96 PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 417 418 420 444 445 2006"
     96PYTHON_NROPTIONS="--id Dakota --exclude 234 417 418 420 444 445 2006"
  • issm/trunk-jpl/jenkins/mac-silicon-python

    r28128 r28129  
    6969#
    7070MATLAB_NROPTIONS=""
    71 PYTHON_NROPTIONS="--exclude [['Dakota'], [119 216 274 423 430 433 435 441:442 448 462:464 508 517 544 546 702:703 808]]"
     71PYTHON_NROPTIONS="--exclude Dakota 119 216 274 423 430 433 435 441:442 448 462:464 508 517 544 546 702:703 808"
    7272
  • issm/trunk-jpl/jenkins/ross-debian_linux-dakota

    r28079 r28129  
    9292#
    9393MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[2006]"
    94 PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 418 420 2006"
     94PYTHON_NROPTIONS="--id Dakota --exclude 234 418 420 2006"
  • issm/trunk-jpl/test/NightlyRun/GetIds.py

    r27787 r28129  
    1 #!/usr/bin/env python
     1#!/usr/bin/env python3
     2
    23from IdToName import *
    34from IdFromString import *
     
    56
    67
    7 def GetIds(ids_names):
    8     """GETIDS - Output ids from a given array of IDs and test names
     8def GetIds(ids):
     9    """GetIds - Output IDs from a given list of IDs, strings representing
     10    MATLAB-like integer ranges, and test names
    911
    1012    The test names can be any string or substring present in the test's name
     
    1315    Usage:
    1416        ids = GetIds(101)
     17        ids = GetIds(101:111)
    1518        ids = GetIds('Dakota')
    16         ids = GetIds([101, 102...])
    17         ids = GetIds([\'Dakota\', \'Slc\'...])
    18         ids = GetIds([[101, 102...], [\'Dakota\', \'Slc\'...]])
     19        ids = GetIds([101, 102[, ...])
     20        ids = GetIds(['Dakota', 'Slc'[, ...]])
     21        ids = GetIds([101, 'Dakota', 'Slc'[, ...]])
    1922    """
    2023
    21     ids = []
     24    output_ids = []
    2225
    23     # Non-list input: either an ID or a test name
    24     if type(ids_names) == str:
    25         ids = IdFromString(ids_names)
    26         if len(ids) == 0:
    27             # fail silently
    28             return []
    29         #raise RuntimeError('runme.py: GetIds.py: No tests with names matching "' + ids_names + '" were found. Note that name checking is case sensitive. Test names are in the first line of a given test eg: "Square" would include test101.py: "SquareShelfConstrainedStressSSA2d"')
     26    for id in ids:
     27        if id.isdigit():
     28            output_ids.append(int(id))
     29        else:
     30            if ':' in id: # MATLAB-like range of test ID's
     31                id_range = id.split(':')
     32                for i in range(int(id_range[0]), int(id_range[1]) + 1):
     33                    output_ids.append(i)
     34            else: # Test name
     35                for id_from_string in IdFromString(id):
     36                    output_ids.append(id_from_string)
    3037
    31     # Non-list input: ID
    32     if type(ids_names) == int:
    33         ids = [ids_names]
    34         if len(ids) == 0:
    35             # fail silently
    36             return []
    37         #raise RuntimeError('runme.py: GetIds.py: No tests with ids matching "' + ids_names + '" were found. Check that there is a test file named "test' + str(ids_names) + '.py"')
    38 
    39     # many inputs of either ids or test names
    40     if type(ids_names) == list and len(ids_names) > 0:
    41         # is everything a string or int?
    42         if np.array([type(i) == int for i in ids_names]).all():
    43             ids = ids_names
    44         elif np.array([type(i) == np.int64 for i in ids_names]).all():
    45             ids = ids_names
    46         elif np.array([type(i) == str for i in ids_names]).all():
    47             ids = np.concatenate([IdFromString(i) for i in ids_names])
    48             if len(ids) == 0:
    49                 raise RuntimeError('runme.py: GetIds.py: No tests with names matching "' + ids_names + '" were found. Note that name checking is case sensitive.')
    50 
    51     # many inputs of both ids and test names
    52     # ids_names[0] -> ids_names by id
    53     # ids_names[1] -> ids_names by test name
    54     #
    55     # NOTE: ID inclusion/exclusion lists will always hit this condition
    56     #       because of the way their respective arguments are gathered at the
    57     #       end of __main__ in the call to function runme.
    58     if type(ids_names) == list and len(ids_names) == 2:
    59         if type(ids_names[0]) == list and len(ids_names[0]) > 0:
    60             ids_expanded = []
    61             for i in ids_names[0]:
    62                 # Handle case where list element follows MATLAB range syntax
    63                 if ':' in i:
    64                     i_range = i.split(':')
    65                     for j in range(int(i_range[0]), int(i_range[1]) + 1):
    66                         ids_expanded.append(j)
    67                 else:
    68                     ids_expanded.append(int(i))
    69             unique_ids = list(set(ids_expanded))
    70             ids += unique_ids
    71         if type(ids_names[1]) == list and len(ids_names[1]) > 0 and type(ids_names[1][0]) == str:
    72             ids = np.concatenate([ids, np.concatenate([IdFromString(i) for i in ids_names[1]])])
    73             if len(ids) == 0:
    74                 raise RuntimeError('runme.py: GetIds.py: No tests with names matching "' + ids_names + '" were found. Note that name checking is case sensitive.')
    75 
    76     # no recognizable ids or id formats
    77     if np.size(ids) == 0 and not np.all(np.equal(ids_names, None)):
    78         raise RuntimeError('runme.py: GetIds.py: include and exclude options (-i/--id; -in/--include_name; -e/--exclude; -en/--exclude_name) options must follow GetIds usage format:\n' + GetIds.__doc__)
    79 
    80     return np.array(ids).astype(int)
     38    return output_ids
  • issm/trunk-jpl/test/NightlyRun/IdFromString.py

    r27202 r28129  
    1 #!/usr/bin/env python
     1#!/usr/bin/env python3
     2
     3from GetAvailableTestIds import *
    24from IdToName import IdToName
    35import os
     
    810
    911def IdFromString(string, verbose=False):
    10     """
    11     IDFROMSTRING - output ids from a given string
    12         Usage:
    13             ids = IdFromString(string)
    14         Examples:
    15             ids = IdFromString('Parallel')
    16             ids = IdFromString('79North')
    17             ids = IdFromString(' * ')
     12    """IdFromString - output ids from a given string
     13
     14    Usage:
     15        ids = IdFromString(string)
     16   
     17    Examples:
     18        ids = IdFromString('Dakota')
     19        ids = IdFromString('Slc')
     20        ids = IdFromString(' * ')
    1821    """
    1922
    20 #Check input
     23    # Check input
    2124    if not isinstance(string, str):
    2225        raise TypeError('IdFromString error message: input argument is not a string.')
     
    2427    string = string.replace('"', '')
    2528
    26 #Get the test ids and names and scan for matches
    27 
     29    # Get the test ids and names and scan for matches
    2830    ids = []
    2931    idnames = []
    30     flist = [f for f in os.listdir('.') if re.match('test[0-9]+.py', f)] # File name must follow the format "test<integer>.py"
    31     list_ids = [int(re.search(r'\d+',file.split('.')[0]).group()) for file in flist] # Retrieve test IDs
    32     for i in list_ids:
     32    available_test_ids = GetAvailableTestIds()
     33    for i in available_test_ids:
    3334        name = IdToName(i)
    3435        if (string == ' * ') or (name is not None and string in name):
     
    3637            idnames.append(name)
    3738
    38 #Return if no test found
     39    # Return if no test found
    3940    if not ids:
    4041        print("No test matches '%s'." % string)
    4142        return ids
    4243
    43 #Display names
     44    # Display names
    4445    if verbose:
    4546        idnames = [i for _, i in sorted(zip(ids, idnames), key=lambda pair: pair[0])]
  • issm/trunk-jpl/test/NightlyRun/IdToName.py

    r24255 r28129  
    1 #!/usr/bin/env python
     1#!/usr/bin/env python3
    22
    33
    44def IdToName(test_id):
     5    """IdToName - return name of test
     6
     7    Usage:
     8        name = IdToName(test_id)
    59    """
    6         IDTONAME - return name of test
    7 
    8             Usage:
    9                name = IdToName(test_id)
    10         """
    1110    infile = open('test' + str(test_id) + '.py', 'r')
    1211    file_text = infile.readline()
  • issm/trunk-jpl/test/NightlyRun/runme.py

    r27782 r28129  
    11#!/usr/bin/env python3
    2 
    32
    43import argparse
     
    2120try:
    2221    from arch import archread
    23 except:  # ISSM_DIR is not on path
     22except: # ISSM_DIR is not on path
    2423    import devpath
    2524
    2625from arch import archread
    2726from arch import archwrite
     27from GetAvailableTestIds import *
    2828from GetIds import *
    2929from IdToName import IdToName
     
    4444    To run tests 101 and 102,
    4545
    46         ./runme.py -i [101, 102]
     46        ./runme.py -i 101 102
    4747
    4848    Options:
     
    9393    exception'; see also jenkins/jenkins.sh). These should be counted as
    9494    failures.
     95    - Add support for 'stoponerror'
    9596    """
    9697
     
    100101    # Process options
    101102    # Get benchmark {{{
    102     if benchmark not in ['all', 'nightly', 'validation', 'adolc', 'eismint', 'ismip', 'mesh', 'slc', 'thermal', 'tranforcing', 'qmu']:
    103         print(('runme warning: benchmark \'{}\' not supported, defaulting to test \'nightly\'.'.format(benchmark)))
     103    if benchmark not in ['all', 'nightly', 'ismip', 'eismint', 'thermal', 'mesh', 'validation', 'tranforcing', 'adolc', 'slc', 'qmu']:
     104        print('runme warning: benchmark \'{}\' not supported, defaulting to test \'nightly\'.'.format(benchmark))
    104105        benchmark = 'nightly'
    105106    # }}}
    106107    # Get procedure {{{
    107108    if procedure not in ['check', 'update', 'runFromNC']:
    108         print(('runme warning: procedure \'{}\' not supported, defaulting to test \'check\'.'.format(procedure)))
     109        print('runme warning: procedure \'{}\' not supported, defaulting to test \'check\'.'.format(procedure))
    109110        procedure = 'check'
    110111    # }}}
    111112    # Get output {{{
    112113    if output not in ['nightly', 'none']:
    113         print(('runme warning: output \'{}\' not supported, defaulting to test \'none\'.'.format(output)))
     114        print('runme warning: output \'{}\' not supported, defaulting to test \'none\'.'.format(output))
    114115        output = 'none'
    115116    # }}}
     
    118119        numprocs = 1
    119120    # }}}
    120     # Get ids  {{{
    121     flist = [f for f in os.listdir('.') if re.match('test[0-9]+.py', f)] # File name must follow the format "test<integer>.py"
    122     list_ids = [int(re.search(r'\d+',f.split('.')[0]).group()) for f in flist] # Retrieve test IDs
    123     i1, i2 = parallelrange(rank, numprocs, len(list_ids))  # Get tests for this CPU only
    124     list_ids = list_ids[i1:i2 + 1]
    125     # Check if we are calling runme as a function with a single integer or string argument and, if so, convert to the proper protocol
    126     if isinstance(id, int):
    127         id = str(id)
    128     if isinstance(id, str):
    129         id = [[id], []]
    130     if len(id[0]) > 0 or len(id[1]) > 0:
    131         test_ids = set(GetIds(id)).intersection(set(list_ids))
     121    # Get available test IDs {{{
     122    all_ids = GetAvailableTestIds()
     123    i1, i2 = parallelrange(rank, numprocs, len(all_ids)) # Get tests for this CPU only
     124    all_ids = all_ids[i1:i2 + 1]
     125
     126    # Check value passed to id argument. If is a single integer or string, convert to a list
     127    requested_ids = GetIds(id)
     128
     129    if len(requested_ids) > 0:
     130        ids_to_run = set(requested_ids).intersection(set(all_ids))
    132131        benchmark = None
    133132    else:
    134         # If no tests are specifically provided, do them all
    135         test_ids = set(list_ids)
    136 
     133        # If no tests are specifically requested, do them all
     134        ids_to_run = set(all_ids)
    137135    # }}}
    138136    # Get excluded tests {{{
    139137    exclude_ids = GetIds(exclude)
    140     test_ids = test_ids.difference(exclude_ids)
    141 
     138    ids_to_run = ids_to_run.difference(exclude_ids)
    142139    # }}}
    143140    if procedure == 'runFromNC':
    144141        # bamg test
    145         test_ids = test_ids.difference([119, 514])
     142        ids_to_run = ids_to_run.difference([119, 514])
    146143        # smbGEMB format is weird for the test
    147         test_ids = test_ids.difference([243, 244, 252, 253])
     144        ids_to_run = ids_to_run.difference([243, 244, 252, 253])
    148145        # AMR runs where the index is missing from fieldnames
    149         test_ids = test_ids.difference([462, 463, 464, 465])
     146        ids_to_run = ids_to_run.difference([462, 463, 464, 465])
    150147        # test247 solves for thermal and transient which makes it complex to check
    151         test_ids = test_ids.difference([247])
     148        ids_to_run = ids_to_run.difference([247])
    152149        # test 902 is running two models with different stepping
    153         test_ids = test_ids.difference([902])
     150        ids_to_run = ids_to_run.difference([902])
    154151        # size issue in 517 needs investigation
    155         test_ids = test_ids.difference([517])
     152        ids_to_run = ids_to_run.difference([517])
    156153
    157154    # Process IDs according to benchmarks {{{
    158155    if benchmark == 'nightly':
    159         test_ids = test_ids.intersection(set(range(1, 1000)))
     156        ids_to_run = ids_to_run.intersection(set(range(1, 1000)))
    160157    elif benchmark == 'validation':
    161         test_ids = test_ids.intersection(set(range(1001, 2000)))
     158        ids_to_run = ids_to_run.intersection(set(range(1001, 2000)))
    162159    elif benchmark == 'ismip':
    163         test_ids = test_ids.intersection(set(range(1101, 1200)))
     160        ids_to_run = ids_to_run.intersection(set(range(1101, 1200)))
    164161    elif benchmark == 'eismint':
    165         test_ids = test_ids.intersection(set(range(1201, 1300)))
     162        ids_to_run = ids_to_run.intersection(set(range(1201, 1300)))
    166163    elif benchmark == 'thermal':
    167         test_ids = test_ids.intersection(set(range(1301, 1400)))
     164        ids_to_run = ids_to_run.intersection(set(range(1301, 1400)))
    168165    elif benchmark == 'mesh':
    169         test_ids = test_ids.intersection(set(range(1401, 1500)))
     166        ids_to_run = ids_to_run.intersection(set(range(1401, 1500)))
    170167    elif benchmark == 'tranforcing':
    171         test_ids = test_ids.intersection(set(range(1501, 1503)))
     168        ids_to_run = ids_to_run.intersection(set(range(1501, 1503)))
    172169    elif benchmark == 'referential':
    173         test_ids = test_ids.intersection(set(range(1601, 1603)))
     170        ids_to_run = ids_to_run.intersection(set(range(1601, 1603)))
    174171    elif benchmark == 'slc':
    175         test_ids = test_ids.intersection(set(range(2001, 2500)))
     172        ids_to_run = ids_to_run.intersection(set(range(2001, 2500)))
    176173    elif benchmark == 'adolc':
    177         test_ids = test_ids.intersection(set(range(3001, 3200)))
     174        ids_to_run = ids_to_run.intersection(set(range(3001, 3200)))
    178175    elif benchmark == 'qmu':
    179         test_ids = test_ids.intersection(set((218, 234, 235, 417, 418, 420)).union(set(range(412, 414))))
    180     test_ids = list(test_ids)
    181     test_ids.sort()
     176        ids_to_run = ids_to_run.intersection(set((218, 234, 235, 417, 418, 420)).union(set(range(412, 414))))
     177    ids_to_run = list(ids_to_run)
     178    ids_to_run.sort()
    182179
    183180    # }}}
     
    185182    # Loop over tests and launch sequence
    186183    root = os.getcwd()
    187     errorcount = 0
    188     erroredtest_list = []
    189     for id in test_ids:
     184    error_count = 0
     185    resulted_in_error = []
     186    for id in ids_to_run:
    190187        print(("----------------starting:{}-----------------------".format(id)))
    191188        try:
     
    320317                            else:
    321318                                print(('ERROR difference: {:7.2g} > {:7.2g} test id: {} field: {}{} is false in both loaded and computed results'.format(error_diff, tolerance, id, fieldname, index + 1)))
    322                                 errorcount += 1
    323                                 erroredtest_list.append(id)
     319                                error_count += 1
     320                                tests_with_error.append(id)
    324321                        elif (np.any(error_diff > tolerance) or np.isnan(error_diff)):
    325322                            print(('ERROR   difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}{}'.format(error_diff, tolerance, id, id_string, fieldname, index + 1)))
    326                             errorcount += 1
    327                             erroredtest_list.append(id)
     323                            error_count += 1
     324                            tests_with_error.append(id)
    328325                        elif (np.any(load_diff > tolerance) or np.isnan(load_diff)):
    329326                            print(('SAVEERROR difference: {:7.2g} > {:7.2g} test id: {} test name: {} saved result : {}{}'.format(load_diff, tolerance, id, id_string, fieldname, index + 1)))
    330                             errorcount += 1
    331                             erroredtest_list.append(id)
     327                            error_count += 1
     328                            tests_with_error.append(id)
    332329                        else:
    333330                            print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}{}'.format(error_diff, tolerance, id, id_string, fieldname, index + 1)))
     
    366363
    367364                        # Compare to archive
    368                         # Matlab uses base 1, so use base 1 in labels
     365                        # MATLAB uses base 1, so use base 1 in labels
    369366                        archive = np.array(archread(archive_file, archive_name + '_field' + str(k + 1)))
    370367                        # NOTE: str(np.array(None)) becomes 'None' but np.array(None) is never equal to None: it basically becomes a type of string in an array
     
    383380                        if (np.any(error_diff > tolerance) or np.isnan(error_diff)):
    384381                            print(('ERROR   difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname)))
    385                             errorcount += 1
    386                             erroredtest_list.append(id)
     382                            error_count += 1
     383                            tests_with_error.append(id)
    387384                        else:
    388385                            print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname)))
     
    416413        print(("----------------finished:{}-----------------------".format(id)))
    417414
    418     if errorcount > 0:
    419         print("{} errors were detected in test {}".format(errorcount, np.unique(erroredtest_list)))
     415    if error_count > 0:
     416        print("{} errors were detected in test {}".format(error_count, np.unique(tests_with_error)))
    420417    return
    421418
     
    432429
    433430        parser = argparse.ArgumentParser(description='runme - test deck for ISSM nightly runs')
    434         parser.add_argument('-i', '--id', nargs='*', type=str, help='followed by the list of ids requested', default=[])
    435         parser.add_argument('-in', '--include_name', nargs='*', type=str, help='followed by the list of test names requested', default=[])
    436         parser.add_argument('-e', '--exclude', nargs='+', type=str, help='ids to be excluded from the test', default=[])
    437         parser.add_argument('-en', '--exclude_name', nargs='+', type=str, help='test names to be excluded from the test', default=[])
     431        parser.add_argument('-i', '--id', nargs='*', help='followed by the list of test ID\'s or names to run', default=[])
     432        parser.add_argument('-e', '--exclude', nargs='*', type=str, help='followed by the list of test ID\'s or names to exclude', default=[])
    438433        parser.add_argument('-b', '--benchmark', help='nightly/ismip/eismint/thermal/mesh/...', default='nightly')
    439434        parser.add_argument('-p', '--procedure', help='check/update', default='check')
     
    443438        args = parser.parse_args()
    444439
    445         md = runme([args.id, args.include_name], [args.exclude, args.exclude_name], args.benchmark, args.procedure, args.output, args.rank, args.numprocs)
     440        md = runme(args.id, args.exclude, args.benchmark, args.procedure, args.output, args.rank, args.numprocs)
    446441
    447442        exit(md)
Note: See TracChangeset for help on using the changeset viewer.