Index: /issm/trunk-jpl/jenkins/mac-intel-dakota
===================================================================
--- /issm/trunk-jpl/jenkins/mac-intel-dakota	(revision 28128)
+++ /issm/trunk-jpl/jenkins/mac-intel-dakota	(revision 28129)
@@ -99,3 +99,3 @@
 #
 MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234 235 418 420 444 445 2006]"
-PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 417 418 420 444 445 2006"
+PYTHON_NROPTIONS="--id Dakota --exclude 234 417 418 420 444 445 2006"
Index: /issm/trunk-jpl/jenkins/mac-silicon-dakota
===================================================================
--- /issm/trunk-jpl/jenkins/mac-silicon-dakota	(revision 28128)
+++ /issm/trunk-jpl/jenkins/mac-silicon-dakota	(revision 28129)
@@ -94,3 +94,3 @@
 #
 MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234 235 418 420 444 445 2006]"
-PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 417 418 420 444 445 2006"
+PYTHON_NROPTIONS="--id Dakota --exclude 234 417 418 420 444 445 2006"
Index: /issm/trunk-jpl/jenkins/mac-silicon-python
===================================================================
--- /issm/trunk-jpl/jenkins/mac-silicon-python	(revision 28128)
+++ /issm/trunk-jpl/jenkins/mac-silicon-python	(revision 28129)
@@ -69,4 +69,4 @@
 #
 MATLAB_NROPTIONS=""
-PYTHON_NROPTIONS="--exclude [['Dakota'], [119 216 274 423 430 433 435 441:442 448 462:464 508 517 544 546 702:703 808]]"
+PYTHON_NROPTIONS="--exclude Dakota 119 216 274 423 430 433 435 441:442 448 462:464 508 517 544 546 702:703 808"
 
Index: /issm/trunk-jpl/jenkins/ross-debian_linux-dakota
===================================================================
--- /issm/trunk-jpl/jenkins/ross-debian_linux-dakota	(revision 28128)
+++ /issm/trunk-jpl/jenkins/ross-debian_linux-dakota	(revision 28129)
@@ -92,3 +92,3 @@
 #
 MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[2006]"
-PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 418 420 2006"
+PYTHON_NROPTIONS="--id Dakota --exclude 234 418 420 2006"
Index: /issm/trunk-jpl/test/NightlyRun/GetIds.py
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/GetIds.py	(revision 28128)
+++ /issm/trunk-jpl/test/NightlyRun/GetIds.py	(revision 28129)
@@ -1,3 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
+
 from IdToName import *
 from IdFromString import *
@@ -5,6 +6,7 @@
 
 
-def GetIds(ids_names):
-    """GETIDS - Output ids from a given array of IDs and test names
+def GetIds(ids):
+    """GetIds - Output IDs from a given list of IDs, strings representing 
+    MATLAB-like integer ranges, and test names
 
     The test names can be any string or substring present in the test's name 
@@ -13,68 +15,24 @@
     Usage:
         ids = GetIds(101)
+        ids = GetIds(101:111)
         ids = GetIds('Dakota')
-        ids = GetIds([101, 102...])
-        ids = GetIds([\'Dakota\', \'Slc\'...])
-        ids = GetIds([[101, 102...], [\'Dakota\', \'Slc\'...]])
+        ids = GetIds([101, 102[, ...])
+        ids = GetIds(['Dakota', 'Slc'[, ...]])
+        ids = GetIds([101, 'Dakota', 'Slc'[, ...]])
     """
 
-    ids = []
+    output_ids = []
 
-    # Non-list input: either an ID or a test name
-    if type(ids_names) == str:
-        ids = IdFromString(ids_names)
-        if len(ids) == 0:
-            # fail silently
-            return []
-        #raise RuntimeError('runme.py: GetIds.py: No tests with names matching "' + ids_names + '" were found. Note that name checking is case sensitive. Test names are in the first line of a given test eg: "Square" would include test101.py: "SquareShelfConstrainedStressSSA2d"')
+    for id in ids:
+        if id.isdigit():
+            output_ids.append(int(id))
+        else:
+            if ':' in id: # MATLAB-like range of test ID's
+                id_range = id.split(':')
+                for i in range(int(id_range[0]), int(id_range[1]) + 1):
+                    output_ids.append(i)
+            else: # Test name
+                for id_from_string in IdFromString(id):
+                    output_ids.append(id_from_string)
 
-    # Non-list input: ID
-    if type(ids_names) == int:
-        ids = [ids_names]
-        if len(ids) == 0:
-            # fail silently
-            return []
-        #raise RuntimeError('runme.py: GetIds.py: No tests with ids matching "' + ids_names + '" were found. Check that there is a test file named "test' + str(ids_names) + '.py"')
-
-    # many inputs of either ids or test names
-    if type(ids_names) == list and len(ids_names) > 0:
-        # is everything a string or int?
-        if np.array([type(i) == int for i in ids_names]).all():
-            ids = ids_names
-        elif np.array([type(i) == np.int64 for i in ids_names]).all():
-            ids = ids_names
-        elif np.array([type(i) == str for i in ids_names]).all():
-            ids = np.concatenate([IdFromString(i) for i in ids_names])
-            if len(ids) == 0:
-                raise RuntimeError('runme.py: GetIds.py: No tests with names matching "' + ids_names + '" were found. Note that name checking is case sensitive.')
-
-    # many inputs of both ids and test names
-    # ids_names[0] -> ids_names by id
-    # ids_names[1] -> ids_names by test name
-    #
-    # NOTE: ID inclusion/exclusion lists will always hit this condition 
-    #       because of the way their respective arguments are gathered at the 
-    #       end of __main__ in the call to function runme.
-    if type(ids_names) == list and len(ids_names) == 2:
-        if type(ids_names[0]) == list and len(ids_names[0]) > 0:
-            ids_expanded = []
-            for i in ids_names[0]:
-                # Handle case where list element follows MATLAB range syntax
-                if ':' in i:
-                    i_range = i.split(':')
-                    for j in range(int(i_range[0]), int(i_range[1]) + 1):
-                        ids_expanded.append(j)
-                else:
-                    ids_expanded.append(int(i))
-            unique_ids = list(set(ids_expanded))
-            ids += unique_ids
-        if type(ids_names[1]) == list and len(ids_names[1]) > 0 and type(ids_names[1][0]) == str:
-            ids = np.concatenate([ids, np.concatenate([IdFromString(i) for i in ids_names[1]])])
-            if len(ids) == 0:
-                raise RuntimeError('runme.py: GetIds.py: No tests with names matching "' + ids_names + '" were found. Note that name checking is case sensitive.')
-
-    # no recognizable ids or id formats
-    if np.size(ids) == 0 and not np.all(np.equal(ids_names, None)):
-        raise RuntimeError('runme.py: GetIds.py: include and exclude options (-i/--id; -in/--include_name; -e/--exclude; -en/--exclude_name) options must follow GetIds usage format:\n' + GetIds.__doc__)
-
-    return np.array(ids).astype(int)
+    return output_ids
Index: /issm/trunk-jpl/test/NightlyRun/IdFromString.py
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/IdFromString.py	(revision 28128)
+++ /issm/trunk-jpl/test/NightlyRun/IdFromString.py	(revision 28129)
@@ -1,3 +1,5 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
+
+from GetAvailableTestIds import *
 from IdToName import IdToName
 import os
@@ -8,15 +10,16 @@
 
 def IdFromString(string, verbose=False):
-    """
-    IDFROMSTRING - output ids from a given string
-        Usage:
-            ids = IdFromString(string)
-        Examples:
-            ids = IdFromString('Parallel')
-            ids = IdFromString('79North')
-            ids = IdFromString(' * ')
+    """IdFromString - output ids from a given string
+
+    Usage:
+        ids = IdFromString(string)
+    
+    Examples:
+        ids = IdFromString('Dakota')
+        ids = IdFromString('Slc')
+        ids = IdFromString(' * ')
     """
 
-#Check input
+    # Check input
     if not isinstance(string, str):
         raise TypeError('IdFromString error message: input argument is not a string.')
@@ -24,11 +27,9 @@
     string = string.replace('"', '')
 
-#Get the test ids and names and scan for matches
-
+    # Get the test ids and names and scan for matches
     ids = []
     idnames = []
-    flist = [f for f in os.listdir('.') if re.match('test[0-9]+.py', f)] # File name must follow the format "test<integer>.py"
-    list_ids = [int(re.search(r'\d+',file.split('.')[0]).group()) for file in flist] # Retrieve test IDs
-    for i in list_ids:
+    available_test_ids = GetAvailableTestIds()
+    for i in available_test_ids:
         name = IdToName(i)
         if (string == ' * ') or (name is not None and string in name):
@@ -36,10 +37,10 @@
             idnames.append(name)
 
-#Return if no test found
+    # Return if no test found
     if not ids:
         print("No test matches '%s'." % string)
         return ids
 
-#Display names
+    # Display names
     if verbose:
         idnames = [i for _, i in sorted(zip(ids, idnames), key=lambda pair: pair[0])]
Index: /issm/trunk-jpl/test/NightlyRun/IdToName.py
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/IdToName.py	(revision 28128)
+++ /issm/trunk-jpl/test/NightlyRun/IdToName.py	(revision 28129)
@@ -1,12 +1,11 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 
 def IdToName(test_id):
+    """IdToName - return name of test
+
+    Usage:
+        name = IdToName(test_id)
     """
-        IDTONAME - return name of test
-
-            Usage:
-               name = IdToName(test_id)
-        """
     infile = open('test' + str(test_id) + '.py', 'r')
     file_text = infile.readline()
Index: /issm/trunk-jpl/test/NightlyRun/runme.py
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/runme.py	(revision 28128)
+++ /issm/trunk-jpl/test/NightlyRun/runme.py	(revision 28129)
@@ -1,4 +1,3 @@
 #!/usr/bin/env python3
-
 
 import argparse
@@ -21,9 +20,10 @@
 try:
     from arch import archread
-except:  # ISSM_DIR is not on path
+except: # ISSM_DIR is not on path
     import devpath
 
 from arch import archread
 from arch import archwrite
+from GetAvailableTestIds import *
 from GetIds import *
 from IdToName import IdToName
@@ -44,5 +44,5 @@
     To run tests 101 and 102,
 
-        ./runme.py -i [101, 102]
+        ./runme.py -i 101 102
 
     Options:
@@ -93,4 +93,5 @@
     exception'; see also jenkins/jenkins.sh). These should be counted as
     failures.
+    - Add support for 'stoponerror'
     """
 
@@ -100,16 +101,16 @@
     # Process options
     # Get benchmark {{{
-    if benchmark not in ['all', 'nightly', 'validation', 'adolc', 'eismint', 'ismip', 'mesh', 'slc', 'thermal', 'tranforcing', 'qmu']:
-        print(('runme warning: benchmark \'{}\' not supported, defaulting to test \'nightly\'.'.format(benchmark)))
+    if benchmark not in ['all', 'nightly', 'ismip', 'eismint', 'thermal', 'mesh', 'validation', 'tranforcing', 'adolc', 'slc', 'qmu']:
+        print('runme warning: benchmark \'{}\' not supported, defaulting to test \'nightly\'.'.format(benchmark))
         benchmark = 'nightly'
     # }}}
     # Get procedure {{{
     if procedure not in ['check', 'update', 'runFromNC']:
-        print(('runme warning: procedure \'{}\' not supported, defaulting to test \'check\'.'.format(procedure)))
+        print('runme warning: procedure \'{}\' not supported, defaulting to test \'check\'.'.format(procedure))
         procedure = 'check'
     # }}}
     # Get output {{{
     if output not in ['nightly', 'none']:
-        print(('runme warning: output \'{}\' not supported, defaulting to test \'none\'.'.format(output)))
+        print('runme warning: output \'{}\' not supported, defaulting to test \'none\'.'.format(output))
         output = 'none'
     # }}}
@@ -118,66 +119,62 @@
         numprocs = 1
     # }}}
-    # Get ids  {{{
-    flist = [f for f in os.listdir('.') if re.match('test[0-9]+.py', f)] # File name must follow the format "test<integer>.py"
-    list_ids = [int(re.search(r'\d+',f.split('.')[0]).group()) for f in flist] # Retrieve test IDs
-    i1, i2 = parallelrange(rank, numprocs, len(list_ids))  # Get tests for this CPU only
-    list_ids = list_ids[i1:i2 + 1]
-    # Check if we are calling runme as a function with a single integer or string argument and, if so, convert to the proper protocol
-    if isinstance(id, int):
-        id = str(id)
-    if isinstance(id, str):
-        id = [[id], []]
-    if len(id[0]) > 0 or len(id[1]) > 0:
-        test_ids = set(GetIds(id)).intersection(set(list_ids))
+    # Get available test IDs {{{
+    all_ids = GetAvailableTestIds()
+    i1, i2 = parallelrange(rank, numprocs, len(all_ids)) # Get tests for this CPU only
+    all_ids = all_ids[i1:i2 + 1]
+
+    # Check value passed to id argument. If is a single integer or string, convert to a list
+    requested_ids = GetIds(id)
+
+    if len(requested_ids) > 0:
+        ids_to_run = set(requested_ids).intersection(set(all_ids))
         benchmark = None
     else:
-        # If no tests are specifically provided, do them all
-        test_ids = set(list_ids)
-
+        # If no tests are specifically requested, do them all
+        ids_to_run = set(all_ids)
     # }}}
     # Get excluded tests {{{
     exclude_ids = GetIds(exclude)
-    test_ids = test_ids.difference(exclude_ids)
-
+    ids_to_run = ids_to_run.difference(exclude_ids)
     # }}}
     if procedure == 'runFromNC':
         # bamg test
-        test_ids = test_ids.difference([119, 514])
+        ids_to_run = ids_to_run.difference([119, 514])
         # smbGEMB format is weird for the test
-        test_ids = test_ids.difference([243, 244, 252, 253])
+        ids_to_run = ids_to_run.difference([243, 244, 252, 253])
         # AMR runs where the index is missing from fieldnames
-        test_ids = test_ids.difference([462, 463, 464, 465])
+        ids_to_run = ids_to_run.difference([462, 463, 464, 465])
         # test247 solves for thermal and transient which makes it complex to check
-        test_ids = test_ids.difference([247])
+        ids_to_run = ids_to_run.difference([247])
         # test 902 is running two models with different stepping
-        test_ids = test_ids.difference([902])
+        ids_to_run = ids_to_run.difference([902])
         # size issue in 517 needs investigation
-        test_ids = test_ids.difference([517])
+        ids_to_run = ids_to_run.difference([517])
 
     # Process IDs according to benchmarks {{{
     if benchmark == 'nightly':
-        test_ids = test_ids.intersection(set(range(1, 1000)))
+        ids_to_run = ids_to_run.intersection(set(range(1, 1000)))
     elif benchmark == 'validation':
-        test_ids = test_ids.intersection(set(range(1001, 2000)))
+        ids_to_run = ids_to_run.intersection(set(range(1001, 2000)))
     elif benchmark == 'ismip':
-        test_ids = test_ids.intersection(set(range(1101, 1200)))
+        ids_to_run = ids_to_run.intersection(set(range(1101, 1200)))
     elif benchmark == 'eismint':
-        test_ids = test_ids.intersection(set(range(1201, 1300)))
+        ids_to_run = ids_to_run.intersection(set(range(1201, 1300)))
     elif benchmark == 'thermal':
-        test_ids = test_ids.intersection(set(range(1301, 1400)))
+        ids_to_run = ids_to_run.intersection(set(range(1301, 1400)))
     elif benchmark == 'mesh':
-        test_ids = test_ids.intersection(set(range(1401, 1500)))
+        ids_to_run = ids_to_run.intersection(set(range(1401, 1500)))
     elif benchmark == 'tranforcing':
-        test_ids = test_ids.intersection(set(range(1501, 1503)))
+        ids_to_run = ids_to_run.intersection(set(range(1501, 1503)))
     elif benchmark == 'referential':
-        test_ids = test_ids.intersection(set(range(1601, 1603)))
+        ids_to_run = ids_to_run.intersection(set(range(1601, 1603)))
     elif benchmark == 'slc':
-        test_ids = test_ids.intersection(set(range(2001, 2500)))
+        ids_to_run = ids_to_run.intersection(set(range(2001, 2500)))
     elif benchmark == 'adolc':
-        test_ids = test_ids.intersection(set(range(3001, 3200)))
+        ids_to_run = ids_to_run.intersection(set(range(3001, 3200)))
     elif benchmark == 'qmu':
-        test_ids = test_ids.intersection(set((218, 234, 235, 417, 418, 420)).union(set(range(412, 414))))
-    test_ids = list(test_ids)
-    test_ids.sort()
+        ids_to_run = ids_to_run.intersection(set((218, 234, 235, 417, 418, 420)).union(set(range(412, 414))))
+    ids_to_run = list(ids_to_run)
+    ids_to_run.sort()
 
     # }}}
@@ -185,7 +182,7 @@
     # Loop over tests and launch sequence
     root = os.getcwd()
-    errorcount = 0
-    erroredtest_list = []
-    for id in test_ids:
+    error_count = 0
+    resulted_in_error = []
+    for id in ids_to_run:
         print(("----------------starting:{}-----------------------".format(id)))
         try:
@@ -320,14 +317,14 @@
                             else:
                                 print(('ERROR difference: {:7.2g} > {:7.2g} test id: {} field: {}{} is false in both loaded and computed results'.format(error_diff, tolerance, id, fieldname, index + 1)))
-                                errorcount += 1
-                                erroredtest_list.append(id)
+                                error_count += 1
+                                tests_with_error.append(id)
                         elif (np.any(error_diff > tolerance) or np.isnan(error_diff)):
                             print(('ERROR   difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}{}'.format(error_diff, tolerance, id, id_string, fieldname, index + 1)))
-                            errorcount += 1
-                            erroredtest_list.append(id)
+                            error_count += 1
+                            tests_with_error.append(id)
                         elif (np.any(load_diff > tolerance) or np.isnan(load_diff)):
                             print(('SAVEERROR difference: {:7.2g} > {:7.2g} test id: {} test name: {} saved result : {}{}'.format(load_diff, tolerance, id, id_string, fieldname, index + 1)))
-                            errorcount += 1
-                            erroredtest_list.append(id)
+                            error_count += 1
+                            tests_with_error.append(id)
                         else:
                             print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}{}'.format(error_diff, tolerance, id, id_string, fieldname, index + 1)))
@@ -366,5 +363,5 @@
 
                         # Compare to archive
-                        # Matlab uses base 1, so use base 1 in labels
+                        # MATLAB uses base 1, so use base 1 in labels
                         archive = np.array(archread(archive_file, archive_name + '_field' + str(k + 1)))
                         # NOTE: str(np.array(None)) becomes 'None' but np.array(None) is never equal to None: it basically becomes a type of string in an array
@@ -383,6 +380,6 @@
                         if (np.any(error_diff > tolerance) or np.isnan(error_diff)):
                             print(('ERROR   difference: {:7.2g} > {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname)))
-                            errorcount += 1
-                            erroredtest_list.append(id)
+                            error_count += 1
+                            tests_with_error.append(id)
                         else:
                             print(('SUCCESS difference: {:7.2g} < {:7.2g} test id: {} test name: {} field: {}'.format(error_diff, tolerance, id, id_string, fieldname)))
@@ -416,6 +413,6 @@
         print(("----------------finished:{}-----------------------".format(id)))
 
-    if errorcount > 0:
-        print("{} errors were detected in test {}".format(errorcount, np.unique(erroredtest_list)))
+    if error_count > 0:
+        print("{} errors were detected in test {}".format(error_count, np.unique(tests_with_error)))
     return
 
@@ -432,8 +429,6 @@
 
         parser = argparse.ArgumentParser(description='runme - test deck for ISSM nightly runs')
-        parser.add_argument('-i', '--id', nargs='*', type=str, help='followed by the list of ids requested', default=[])
-        parser.add_argument('-in', '--include_name', nargs='*', type=str, help='followed by the list of test names requested', default=[])
-        parser.add_argument('-e', '--exclude', nargs='+', type=str, help='ids to be excluded from the test', default=[])
-        parser.add_argument('-en', '--exclude_name', nargs='+', type=str, help='test names to be excluded from the test', default=[])
+        parser.add_argument('-i', '--id', nargs='*', help='followed by the list of test ID\'s or names to run', default=[])
+        parser.add_argument('-e', '--exclude', nargs='*', type=str, help='followed by the list of test ID\'s or names to exclude', default=[])
         parser.add_argument('-b', '--benchmark', help='nightly/ismip/eismint/thermal/mesh/...', default='nightly')
         parser.add_argument('-p', '--procedure', help='check/update', default='check')
@@ -443,5 +438,5 @@
         args = parser.parse_args()
 
-        md = runme([args.id, args.include_name], [args.exclude, args.exclude_name], args.benchmark, args.procedure, args.output, args.rank, args.numprocs)
+        md = runme(args.id, args.exclude, args.benchmark, args.procedure, args.output, args.rank, args.numprocs)
 
         exit(md)
