Index: /issm/trunk-jpl/src/m/mesh/bamg.m
===================================================================
--- /issm/trunk-jpl/src/m/mesh/bamg.m	(revision 28157)
+++ /issm/trunk-jpl/src/m/mesh/bamg.m	(revision 28158)
@@ -29,5 +29,5 @@
 %   - NoBoundaryRefinementAllBoundaries: do not refine boundary, only follow contour provided (default 0)
 %   - maxnbv :            maximum number of vertices used to allocate memory (default is 10^6)
-%   - maxsubdiv :         maximum subdivision of exisiting elements (default is 10)
+%   - maxsubdiv :         maximum subdivision of existing elements (default is 10)
 %   - metric :            matrix (numberofnodes x 3) used as a metric
 %   - Metrictype :        0 -> absolute error          c/(err coeff^2) * Abs(H)        (default)
@@ -58,5 +58,5 @@
 options=deleteduplicates(options,1);
 
-%initialize the structures required as input of Bamg
+%initialize the structures required as input of BAMG
 bamg_options=struct();
 bamg_geometry=bamggeom();
@@ -65,5 +65,5 @@
 subdomain_ref = 1;
 hole_ref = 1;
-% Bamg Geometry parameters {{{
+% BAMG Geometry parameters {{{
 if exist(options,'domain'),
 
@@ -453,5 +453,5 @@
 end
 %}}}
-% Bamg Mesh parameters {{{
+% BAMG Mesh parameters {{{
 if (~exist(options,'domain') & md.mesh.numberofvertices~=0 & strcmp(elementtype(md.mesh),'Tria')),
 
@@ -468,9 +468,9 @@
 end
 %}}}
-% Bamg Options {{{
+% BAMG Options {{{
 bamg_options.Crack=getfieldvalue(options,'Crack',0);
-bamg_options.anisomax=getfieldvalue(options,'anisomax',10.^30);
+bamg_options.anisomax=getfieldvalue(options,'anisomax',1e30);
 bamg_options.coeff=getfieldvalue(options,'coeff',1.);
-bamg_options.cutoff=getfieldvalue(options,'cutoff',10.^-5);
+bamg_options.cutoff=getfieldvalue(options,'cutoff',1e-5);
 bamg_options.err=getfieldvalue(options,'err',0.01);
 bamg_options.errg=getfieldvalue(options,'errg',0.1);
@@ -478,11 +478,11 @@
 bamg_options.gradation=getfieldvalue(options,'gradation',1.5);
 bamg_options.Hessiantype=getfieldvalue(options,'Hessiantype',0);
-bamg_options.hmin=getfieldvalue(options,'hmin',10.^-100);
-bamg_options.hmax=getfieldvalue(options,'hmax',10.^100);
+bamg_options.hmin=getfieldvalue(options,'hmin',1e-100);
+bamg_options.hmax=getfieldvalue(options,'hmax',1e100);
 bamg_options.hminVertices=getfieldvalue(options,'hminVertices',[]);
 bamg_options.hmaxVertices=getfieldvalue(options,'hmaxVertices',[]);
 bamg_options.hVertices=getfieldvalue(options,'hVertices',[]);
 bamg_options.KeepVertices=getfieldvalue(options,'KeepVertices',1);
-bamg_options.maxnbv=getfieldvalue(options,'maxnbv',10^6);
+bamg_options.maxnbv=getfieldvalue(options,'maxnbv',1e6);
 bamg_options.maxsubdiv=getfieldvalue(options,'maxsubdiv',10.);
 bamg_options.metric=getfieldvalue(options,'metric',[]);
@@ -496,5 +496,5 @@
 %}}}
 
-%call Bamg
+%call BAMG
 [bamgmesh_out bamggeom_out]=BamgMesher(bamg_mesh,bamg_geometry,bamg_options);
 
@@ -550,5 +550,5 @@
 end
 
-%Bamg private fields
+%BAMG private fields
 md.private.bamg=struct();
 md.private.bamg.mesh=bamgmesh(bamgmesh_out);
Index: /issm/trunk-jpl/src/m/mesh/bamg.py
===================================================================
--- /issm/trunk-jpl/src/m/mesh/bamg.py	(revision 28157)
+++ /issm/trunk-jpl/src/m/mesh/bamg.py	(revision 28158)
@@ -19,97 +19,49 @@
 
 def bamg(md, *args):
-    """BAMG - mesh generation
+    """bamg - mesh generation
 
     Available options (for more details see ISSM website http://issm.jpl.nasa.gov/):
 
-    - domain :                              followed by an ARGUS file that
-                                            prescribes the domain outline
-    - holes :                               followed by an ARGUS file that
-                                            prescribes the holes
-    - subdomains :                          followed by an ARGUS file that
-                                            prescribes the list of subdomains
-                                            (that need to be inside domain)
-
-    - hmin :                                minimum edge length (default is
-                                            1.0e-100)
-    - hmax :                                maximum edge length (default is
-                                            1.0e100)
-    - hVertices :                           imposed edge length for each vertex
-                                            (geometry or mesh)
-    - hminVertices :                        minimum edge length for each vertex
-                                            (mesh)
-    - hmaxVertices :                        maximum edge length for each vertex
-                                            (mesh)
-
-    - anisomax :                            maximum ratio between the smallest
-                                            and largest edges (default is
-                                            1.0e30)
-    - coeff :                               coefficient applied to the metric
-                                            (2 -> twice as many elements,
-                                            default is 1)
-    - cutoff :                              scalar used to compute the metric
-                                            when metric type 2 or 3 are applied
-    - err :                                 error used to generate the metric
-                                            from a field
+    - domain :                              followed by an ARGUS file that prescribes the domain outline
+    - holes :                               followed by an ARGUS file that prescribes the holes
+    - subdomains :                          followed by an ARGUS file that prescribes the list of subdomains (that need to be inside domain)
+
+    - hmin :                                minimum edge length (default is 1.0e-100)
+    - hmax :                                maximum edge length (default is 1.0e100)
+    - hVertices :                           imposed edge length for each vertex (geometry or mesh)
+    - hminVertices :                        minimum edge length for each vertex (mesh)
+    - hmaxVertices :                        maximum edge length for each vertex (mesh)
+
+    - anisomax :                            maximum ratio between the smallest and largest edges (default is 1.0e30)
+    - coeff :                               coefficient applied to the metric (2 -> twice as many elements, default is 1)
+    - cutoff :                              scalar used to compute the metric when metric type 2 or 3 are applied
+    - err :                                 error used to generate the metric from a field
     - errg :                                geometric error (default is 0.1)
-    - field :                               field of the model that will be
-                                            used to compute the metric to apply
-                                            several fields, use one column per
-                                            field
-    - gradation :                           maximum ratio between two adjacent
-                                            edges
-    - Hessiantype :                         0 -> use double P2 projection
-                                            (default)
+    - field :                               field of the model that will be used to compute the metric to apply several fields, use one column per field
+    - gradation :                           maximum ratio between two adjacent edges
+    - Hessiantype :                         0 -> use double P2 projection (default)
                                             1 -> use Green formula
-    - KeepVertices :                        try to keep initial vertices when
-                                            adaptation is done on an existing
-                                            mesh (default 1)
-    - NoBoundaryRefinement :                do not refine boundary, only follow
-                                            contour provided (default 0). Allow
-                                            subdomain boundary refinement
-                                            though
-    - NoBoundaryRefinementAllBoundaries :   do not refine boundary, only follow
-                                            contour provided (default 0)
-    - maxnbv :                              maximum number of vertices used to
-                                            allocate memory (default is 1.0e6)
-    - maxsubdiv :                           maximum subdivision of exisiting
-                                            elements (default is 10)
-    - metric :                              matrix (numberofnodes x 3) used as
-                                            a metric
-    - Metrictype :                          1 -> absolute error
-                                            c/(err coeff^2) * Abs(H) (default)
-                                            2 -> relative error
-                                            c / (err coeff^2) * Abs(H) /
-                                            max(s, cutoff * max(s))
-                                            3 -> rescaled absolute error
-                                            c / (err coeff^2) * Abs(H) /
-                                            (smax - smin)
-    - nbjacoby :                            correction used by Hessiantype = 1
-                                            (default is 1)
-    - nbsmooth :                            number of metric smoothing
-                                            procedure (default is 3)
-    - omega :                               relaxation parameter of the
-                                            smoothing procedure (default is
-                                            1.8)
-    - power :                               power applied to the metric
-                                            (default is 1)
-    - splitcorners :                        split triangles which have 3
-                                            vertices on the outline (default is
-                                            1)
+    - KeepVertices :                        try to keep initial vertices when adaptation is done on an existing mesh (default 1)
+    - NoBoundaryRefinement :                do not refine boundary, only follow contour provided (default 0). Allow subdomain boundary refinement though
+    - NoBoundaryRefinementAllBoundaries :   do not refine boundary, only follow contour provided (default 0)
+    - maxnbv :                              maximum number of vertices used to allocate memory (default is 1.0e6)
+    - maxsubdiv :                           maximum subdivision of existing elements (default is 10)
+    - metric :                              matrix (numberofnodes x 3) used as a metric
+    - Metrictype :                          1 -> absolute error          c/(err coeff^2) * Abs(H)        (default)
+                                            2 -> relative error          c / (err coeff^2) * Abs(H) / max(s, cutoff * max(s))
+                                            3 -> rescaled absolute error c / (err coeff^2) * Abs(H) / (smax - smin)
+    - nbjacoby :                            correction used by Hessiantype = 1 (default is 1)
+    - nbsmooth :                            number of metric smoothing procedure (default is 3)
+    - omega :                               relaxation parameter of the smoothing procedure (default is 1.8)
+    - power :                               power applied to the metric (default is 1)
+    - splitcorners :                        split triangles which have 3 vertices on the outline (default is 1)
     - verbose :                             level of verbosity (default is 1)
 
-    - rifts :                               followed by an ARGUS file that
-                                            prescribes the rifts
-    - toltip :                              tolerance to move tip on an
-                                            existing point of the domain
-                                            outline
-    - tracks :                              followed by an ARGUS file that
-                                            prescribes the tracks that the mesh
-                                            will stick to
-    - RequiredVertices :                    mesh vertices that are required.
-                                            [x, y, ref]; ref is optional
-    - tol :                                 if the distance between 2 points of
-                                            the domain outline is less than
-                                            tol, they will be merged
+    - vertical :                            is this a 2d vertical mesh (flowband, default is 0)
+    - rifts :                               followed by an ARGUS file that prescribes the rifts
+    - toltip :                              tolerance to move tip on an existing point of the domain outline
+    - tracks :                              followed by an ARGUS file that prescribes the tracks that the mesh will stick to
+    - RequiredVertices :                    mesh vertices that are required. [x, y, ref]; ref is optional
+    - tol :                                 if the distance between 2 points of the domain outline is less than tol, they will be merged
 
     Examples:
@@ -126,5 +78,5 @@
     #options = deleteduplicates(options, 1)
 
-    # Initialize the structures required as input of Bamg
+    # Initialize the structures required as input of BAMG
     bamg_options = OrderedDict()
     bamg_geometry = bamggeom()
@@ -134,5 +86,5 @@
     hole_ref = 1
 
-    # Bamg Geometry parameters {{{
+    # BAMG Geometry parameters {{{
     if options.exist('domain'):
         #Check that file exists
@@ -532,8 +484,8 @@
         bamg_geometry = bamggeom(md.private.bamg['geometry'].__dict__)
     else:
-        #do nothing...
+        # Do nothing...
         pass
     # }}}
-    # Bamg mesh parameters {{{
+    # BAMG mesh parameters {{{
     if not options.exist('domain') and md.mesh.numberofvertices and md.mesh.elementtype() == 'Tria':
         if isinstance(md.private.bamg, dict) and 'mesh' in md.private.bamg:
@@ -548,23 +500,23 @@
 
         if isinstance(md.rifts.riftstruct, dict):
-            raise TypeError("bamg error message: rifts not supported yet. Do meshprocessrift AFTER bamg")
+            raise TypeError('bamg error message: rifts not supported yet. Do meshprocessrift AFTER bamg')
     # }}}
-    # Bamg options {{{
+    # BAMG options {{{
     bamg_options['Crack'] = options.getfieldvalue('Crack', 0)
-    bamg_options['anisomax'] = options.getfieldvalue('anisomax', pow(10.0, 18))
+    bamg_options['anisomax'] = options.getfieldvalue('anisomax', 1e30)
     bamg_options['coeff'] = options.getfieldvalue('coeff', 1.0)
-    bamg_options['cutoff'] = options.getfieldvalue('cutoff', pow(10.0, -5))
-    bamg_options['err'] = options.getfieldvalue('err', np.array([[0.01]]))
+    bamg_options['cutoff'] = options.getfieldvalue('cutoff', 1e-5)
+    bamg_options['err'] = options.getfieldvalue('err', 0.01)
     bamg_options['errg'] = options.getfieldvalue('errg', 0.1)
     bamg_options['field'] = options.getfieldvalue('field', np.empty((0, 1)))
     bamg_options['gradation'] = options.getfieldvalue('gradation', 1.5)
     bamg_options['Hessiantype'] = options.getfieldvalue('Hessiantype', 0)
-    bamg_options['hmin'] = options.getfieldvalue('hmin', pow(10.0, -100))
-    bamg_options['hmax'] = options.getfieldvalue('hmax', pow(10.0, 100))
+    bamg_options['hmin'] = options.getfieldvalue('hmin', 1e-100)
+    bamg_options['hmax'] = options.getfieldvalue('hmax', 1e100)
     bamg_options['hminVertices'] = options.getfieldvalue('hminVertices', np.empty((0, 1)))
     bamg_options['hmaxVertices'] = options.getfieldvalue('hmaxVertices', np.empty((0, 1)))
     bamg_options['hVertices'] = options.getfieldvalue('hVertices', np.empty((0, 1)))
     bamg_options['KeepVertices'] = options.getfieldvalue('KeepVertices', 1)
-    bamg_options['maxnbv'] = options.getfieldvalue('maxnbv', pow(10, 6))
+    bamg_options['maxnbv'] = options.getfieldvalue('maxnbv', 1e6)
     bamg_options['maxsubdiv'] = options.getfieldvalue('maxsubdiv', 10.0)
     bamg_options['metric'] = options.getfieldvalue('metric', np.empty((0, 1)))
@@ -578,5 +530,5 @@
     # }}}
 
-    # Call Bamg
+    # Call BAMG
     bamgmesh_out, bamggeom_out = BamgMesher(bamg_mesh.__dict__, bamg_geometry.__dict__, bamg_options)
 
@@ -632,5 +584,5 @@
         md.mesh.vertexonboundary[md.mesh.segments[:, 0:2] - 1] = 1
 
-    # Bamg private fields
+    # BAMG private fields
     md.private.bamg = OrderedDict()
     md.private.bamg['mesh'] = bamgmesh(bamgmesh_out)
@@ -732,5 +684,5 @@
 
     if num:
-        print(('WARNING: {} points outside the domain outline have been removed'.format(num)))
+        print('WARNING: {} points outside the domain outline have been removed'.format(num))
 
     """
Index: /issm/trunk-jpl/src/m/modules/ContourToMesh.py
===================================================================
--- /issm/trunk-jpl/src/m/modules/ContourToMesh.py	(revision 28157)
+++ /issm/trunk-jpl/src/m/modules/ContourToMesh.py	(revision 28158)
@@ -3,24 +3,20 @@
 
 def ContourToMesh(index, x, y, contourname, interptype, edgevalue):
-    """
+    """ContourToMesh - Flag the elements or nodes inside a contour
 
-    CONTOURTOMESH - Flag the elements or nodes inside a contour
+    Usage:
+        [in_nod, in_elem] = ContourToMesh(index, x, y, contourname, interptype, edgevalue)
 
-        Usage:
-            [in_nod, in_elem] = ContourToMesh(index, x, y, contourname, interptype, edgevalue)
+        index, x, y: mesh triangulation.
+        contourname: name of .exp file containing the contours.
+        interptype: string defining type of interpolation ('element', or 'node').
+        edgevalue: integer (0, 1 or 2) defining the value associated to the nodes on the edges of the polygons.
+        in_nod: vector of flags (0 or 1), of size nel if interptype is set to 'node' or 'element and node', or of size 0 otherwise.
+        in_elem: vector of flags (0 or 1), of size nel if interptype is set to 'element' or 'element and node', or of size 0 otherwise.
 
-            index, x, y: mesh triangulation.
-            contourname: name of .exp file containing the contours.
-            interptype: string defining type of interpolation ('element', or 'node').
-            edgevalue: integer (0, 1 or 2) defining the value associated to the nodes on the edges of the polygons.
-            in_nod: vector of flags (0 or 1), of size nel if interptype is set to 'node' or 'element and node',
-                or of size 0 otherwise.
-            in_elem: vector of flags (0 or 1), of size nel if interptype is set to 'element' or 'element and node',
-                or of size 0 otherwise.
-
-        Example:
-            in_nod = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'node', 1)
-            in_elements = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element', 0)
-            [in_nodes, in_elements] = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element and node', 0)
+    Example:
+        in_nod = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'node', 1)
+        in_elements = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element', 0)
+        [in_nodes, in_elements] = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element and node', 0)
     """
     #Call mex module
Index: /issm/trunk-jpl/src/m/modules/ContourToNodes.py
===================================================================
--- /issm/trunk-jpl/src/m/modules/ContourToNodes.py	(revision 28157)
+++ /issm/trunk-jpl/src/m/modules/ContourToNodes.py	(revision 28158)
@@ -3,5 +3,5 @@
 
 def ContourToNodes(x, y, contourname, edgevalue):
-    """CONTOURTONODES - flags vertices inside contour
+    """ContourToNodes - flags vertices inside contour
 
     x, y:           list of nodes
Index: /issm/trunk-jpl/src/m/solve/WriteData.py
===================================================================
--- /issm/trunk-jpl/src/m/solve/WriteData.py	(revision 28157)
+++ /issm/trunk-jpl/src/m/solve/WriteData.py	(revision 28158)
@@ -8,5 +8,5 @@
 
 def WriteData(fid, prefix, *args):
-    """WRITEDATA - write model field in binary file
+    """WriteData - write model field in binary file
 
     Usage:
@@ -14,9 +14,9 @@
     """
 
-    #process options
+    # Process options
     options = pairoptions.pairoptions(*args)
-    #Get data properties
+    # Get data properties
     if options.exist('object'):
-        #This is an object field, construct enum and data
+        # This is an object field, construct enum and data
         obj = options.getfieldvalue('object')
         fieldname = options.getfieldvalue('fieldname')
@@ -27,5 +27,5 @@
             data = getattr(obj, fieldname)
     else:
-        #No processing required
+        # No processing required
         data = options.getfieldvalue('data')
         name = options.getfieldvalue('name')
@@ -35,29 +35,29 @@
     timeserieslength = options.getfieldvalue('timeserieslength', -1)
 
-    #Process sparse matrices
+    # Process sparse matrices
     #       if issparse(data),
     #               data = full(data)
     #       end
 
-    # Make a copy of the the data so that we do not accidently overwrite any
+    # Make a copy of the the data so that we do not accidentally overwrite any
     # model fields.
     #
     data = copy(data)
 
-    #Scale data if necessary
+    # Scale data if necessary
     if datatype == 'MatArray':
-        #if it is a matrix array we loop over the matrices
+        # If it is a matrix array we loop over the matrices
         for i in range(len(data)):
             if options.exist('scale'):
                 scale = options.getfieldvalue('scale')
                 if np.ndim(data[i]) > 1 and data[i].shape[0] == timeserieslength:
-                    #We scale everything but the last line that holds time
+                    # We scale everything but the last line that holds time
                     data[i][:-1, :] = scale * data[i][:-1, :]
                 else:
                     data[i] = scale * data[i]
             if np.ndim(data[i]) > 1 and data[i].shape[0] == timeserieslength:
-                #no scaling given, just get the time right
+                # No scaling given, just get the time right
                 yts = options.getfieldvalue('yts')
-                #We scale only the last line that holds time
+                # We scale only the last line that holds time
                 data[i][-1, :] = yts * data[i][-1, :]
     else:
@@ -65,5 +65,5 @@
             scale = options.getfieldvalue('scale')
             if np.ndim(data) > 1 and data.shape[0] == timeserieslength:
-                #We scale everything but the last line that holds time
+                # We scale everything but the last line that holds time
                 data[:-1, :] = scale * data[:-1, :]
             elif type(data) is list: # Deal with "TypeError: can't multiply sequence by non-int of type 'float'" for type list
@@ -72,9 +72,9 @@
                     scaleddata.append(scale * data[i])
                 data = scaleddata
-            else: #
+            else:
                 data = scale * data
         if np.ndim(data) > 1 and data.shape[0] == timeserieslength:
             yts = options.getfieldvalue('yts')
-            #We scale only the last line that holds time
+            # We scale only the last line that holds time
             # scaler = np.ones((np.shape(data)))
             # scaler[-1, :] = yts
@@ -82,16 +82,16 @@
             data[-1, :] = yts * data[-1, :]
 
-    #Step 1: write the enum to identify this record uniquely
+    # Step 1: write the enum to identify this record uniquely
     fid.write(pack('i', len(name)))
     fid.write(pack('{}s'.format(len(name)), name.encode()))
 
-    #Step 2: write the data itself.
+    # Step 2: write the data itself.
     if datatype == 'Boolean':  # {{{
-        #first write length of record
+        # First write length of record
         fid.write(pack('q', 4 + 4))  #1 bool (disguised as an int) + code
-        #write data code:
-        fid.write(pack('i', FormatToCode(datatype)))
-
-        #now write bool as an integer
+        # Write data code
+        fid.write(pack('i', FormatToCode(datatype)))
+
+        # Now write bool as an integer
         try:
             fid.write(pack('i', int(data)))  #send an int, not easy to send a bool
@@ -101,11 +101,11 @@
 
     elif datatype == 'Integer':  # {{{
-        #first write length of record
+        # First write length of record
         fid.write(pack('q', 4 + 4))  #1 integer + code
-        #write data code:
-        fid.write(pack('i', FormatToCode(datatype)))
-        #now write integer
-        try:
-            fid.write(pack('i', int(data)))  #force an int,
+        # Write data code:
+        fid.write(pack('i', FormatToCode(datatype)))
+        # Now write integer
+        try:
+            fid.write(pack('i', int(data))) # force an int
         except error as Err:
             raise ValueError('field {} cannot be marshaled, {}'.format(name, Err))
@@ -113,11 +113,11 @@
 
     elif datatype == 'Double':  # {{{
-        #first write length of record
-        fid.write(pack('q', 8 + 4))  #1 double + code
-
-    #write data code:
-        fid.write(pack('i', FormatToCode(datatype)))
-
-    #now write double
+        # First write length of record
+        fid.write(pack('q', 8 + 4)) # 1 double + code
+
+        # Write data code
+        fid.write(pack('i', FormatToCode(datatype)))
+
+        # Now write double
         try:
             fid.write(pack('d', data))
@@ -127,9 +127,9 @@
 
     elif datatype == 'String':  # {{{
-        #first write length of record
-        fid.write(pack('q', len(data) + 4 + 4))  #string + string size + code
-        #write data code:
-        fid.write(pack('i', FormatToCode(datatype)))
-        #now write string
+        # First write length of record
+        fid.write(pack('q', len(data) + 4 + 4)) # string + string size + code
+        # Write data code
+        fid.write(pack('i', FormatToCode(datatype)))
+        # Now write string
         fid.write(pack('i', len(data)))
         fid.write(pack('{}s'.format(len(data)), data.encode()))
@@ -147,19 +147,19 @@
                 data = data.reshape(0, 0)
 
-    #Get size
+        # Get size
         s = data.shape
-    #if matrix = NaN, then do not write anything
+        # If matrix = NaN, then do not write anything
         if np.ndim(data) == 2 and np.product(s) == 1 and np.all(np.isnan(data)):
             s = (0, 0)
 
-    #first write length of record
-        recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4  #2 integers (32 bits) + the double matrix + code + matrix type
+        # First write length of record
+        recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 # 2 integers (32 bits) + the double matrix + code + matrix type
         fid.write(pack('q', recordlength))
 
-    #write data code and matrix type:
+        # Write data code and matrix type
         fid.write(pack('i', FormatToCode(datatype)))
         fid.write(pack('i', mattype))
 
-    #now write matrix
+        # Now write matrix
         fid.write(pack('i', s[0]))
         try:
@@ -186,12 +186,12 @@
                 data = data.reshape(0, 0)
 
-    #Get size
+        # Get size
         s = data.shape
-    #if matrix = NaN, then do not write anything
+        # If matrix = NaN, then do not write anything
         if np.ndim(data) == 1 and np.product(s) == 1 and np.all(np.isnan(data)):
             s = (0, 0)
 
-    #first write length of record
-        recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4  #2 integers (32 bits) + the double matrix + code + matrix type
+        # First write length of record
+        recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 # 2 integers (32 bits) + the double matrix + code + matrix type
 
         try:
@@ -200,8 +200,8 @@
             raise ValueError('Field {} can not be marshaled, {}, with "number" the length of the record.'.format(name, Err))
 
-    #write data code and matrix type:
+        # Write data code and matrix type
         fid.write(pack('i', FormatToCode(datatype)))
         fid.write(pack('i', mattype))
-    #now write matrix
+        # Now write matrix
         fid.write(pack('i', s[0]))
         try:
@@ -211,8 +211,8 @@
         for i in range(s[0]):
             if np.ndim(data) == 1:
-                fid.write(pack('d', float(data[i])))  #get to the "c" convention, hence the transpose
+                fid.write(pack('d', float(data[i]))) # get to the "c" convention, hence the transpose
             else:
                 for j in range(s[1]):
-                    fid.write(pack('d', float(data[i][j])))  #get to the "c" convention, hence the transpose
+                    fid.write(pack('d', float(data[i][j]))) # get to the "c" convention, hence the transpose
     # }}}
 
@@ -228,5 +228,5 @@
                 data = data.reshape(0, 0)
 
-    #Get size
+        # Get size
         s = data.shape
         if np.ndim(data) == 1:
@@ -235,11 +235,11 @@
             n2 = s[1]
 
-    #if matrix = NaN, then do not write anything
+        # If matrix = NaN, then do not write anything
         if np.ndim(data) == 1 and np.product(s) == 1 and np.all(np.isnan(data)):
             s = (0, 0)
             n2 = 0
 
-    #first write length of record
-        recordlength = 4 + 4 + 8 + 8 + 1 * (s[0] - 1) * n2 + 8 * n2 + 4 + 4  #2 integers (32 bits) + the matrix + code + matrix type
+        # First write length of record
+        recordlength = 4 + 4 + 8 + 8 + 1 * (s[0] - 1) * n2 + 8 * n2 + 4 + 4 # 2 integers (32 bits) + the matrix + code + matrix type
         try:
             fid.write(pack('q', recordlength))
@@ -247,9 +247,9 @@
             raise ValueError('Field {} can not be marshaled, {}, with "number" the lenght of the record.'.format(name, Err))
 
-    #write data code and matrix type:
+        # Write data code and matrix type
         fid.write(pack('i', FormatToCode(datatype)))
         fid.write(pack('i', mattype))
 
-    #Write offset and range
+        # Write offset and range
         A = data[0:s[0] - 1]
         offsetA = A.min()
@@ -261,5 +261,5 @@
             A = (A - offsetA) / rangeA * 255.
 
-    #now write matrix
+        # Now write matrix
         fid.write(pack('i', s[0]))
         try:
@@ -272,10 +272,10 @@
             for i in range(s[0] - 1):
                 fid.write(pack('B', int(A[i])))
-            fid.write(pack('d', float(data[s[0] - 1])))  #get to the "c" convention, hence the transpose
+            fid.write(pack('d', float(data[s[0] - 1]))) # get to the "c" convention, hence the transpose
 
         elif np.product(s) > 0:
             for i in range(s[0] - 1):
                 for j in range(s[1]):
-                    fid.write(pack('B', int(A[i][j])))  #get to the "c" convention, hence the transpose
+                    fid.write(pack('B', int(A[i][j]))) # get to the "c" convention, hence the transpose
 
             for j in range(s[1]):
@@ -285,6 +285,6 @@
 
     elif datatype == 'MatArray':  # {{{
-        #first get length of record
-        recordlength = 4 + 4  #number of records + code
+        # First get length of record
+        recordlength = 4 + 4 # number of records + code
         for matrix in data:
             if isinstance(matrix, (bool, int, float)):
@@ -299,13 +299,13 @@
 
             s = matrix.shape
-            recordlength += 4 * 2 + np.product(s) * 8  #row and col of matrix + matrix of doubles
-
-    #write length of record
+            recordlength += 4 * 2 + np.product(s) * 8 # row and col of matrix + matrix of doubles
+
+        # Write length of record
         fid.write(pack('q', recordlength))
 
-    #write data code:
-        fid.write(pack('i', FormatToCode(datatype)))
-
-    #write data, first number of records
+        # Write data code
+        fid.write(pack('i', FormatToCode(datatype)))
+
+        # Write data, first number of records
         fid.write(pack('i', len(data)))
 
@@ -327,5 +327,5 @@
             for i in range(s[0]):
                 if np.ndim(matrix) == 1:
-                    fid.write(pack('d', float(matrix[i])))  #get to the "c" convention, hence the transpose
+                    fid.write(pack('d', float(matrix[i]))) # get to the "c" convention, hence the transpose
                 else:
                     for j in range(s[1]):
@@ -334,16 +334,16 @@
 
     elif datatype == 'StringArray':  # {{{
-        #first get length of record
-        recordlength = 4 + 4  #for length of array + code
+        # First get length of record
+        recordlength = 4 + 4 # for length of array + code
         for string in data:
-            recordlength += 4 + len(string)  #for each string
-
-        #write length of record
+            recordlength += 4 + len(string) # for each string
+
+        # Write length of record
         fid.write(pack('q', recordlength))
-        #write data code:
-        fid.write(pack('i', FormatToCode(datatype)))
-        #now write length of string array
+        # Write data code
+        fid.write(pack('i', FormatToCode(datatype)))
+        # Now write length of string array
         fid.write(pack('i', len(data)))
-        #now write the strings
+        # Now write the strings
         for string in data:
             fid.write(pack('i', len(string)))
@@ -357,5 +357,5 @@
 
 def FormatToCode(datatype):  # {{{
-    """ FORMATTOCODE - This routine takes the datatype string, and hardcodes it 
+    """ FormatToCode - This routine takes the datatype string, and hardcodes it 
     into an integer, which is passed along the record, in order to identify the 
     nature of the dataset being sent.
Index: /issm/trunk-jpl/test/NightlyRun/runme.m
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/runme.m	(revision 28157)
+++ /issm/trunk-jpl/test/NightlyRun/runme.m	(revision 28158)
@@ -12,6 +12,6 @@
 %
 %   Available options:
-%      'id'            followed by the list of ids requested
-%      'exclude'       ids to be excluded from the test
+%      'id'            followed by the list of test ID's or names to run
+%      'exclude'       followed by the list of test ID's or names to exclude
 %      'benchmark'     'all'         : (all of them)
 %                      'nightly'     : (nightly run
@@ -61,5 +61,5 @@
 
 %Process options
-%GET benchmark {{{
+%Get benchmark {{{
 benchmark=getfieldvalue(options,'benchmark','nightly');
 if ~ismember(benchmark,{'all','nightly','ismip','eismint','thermal','mesh','validation','tranforcing','adolc','slc','qmu'})
@@ -68,5 +68,5 @@
 end
 % }}}
-%GET procedure {{{
+%Get procedure {{{
 procedure=getfieldvalue(options,'procedure','check');
 if ~ismember(procedure,{'check','update','valgrind','ncExport'})
@@ -75,5 +75,5 @@
 end
 % }}}
-%GET output {{{
+%Get output {{{
 output=getfieldvalue(options,'output','none');
 if ~ismember(output,{'nightly','none'})
@@ -82,18 +82,18 @@
 end
 % }}}
-%GET RANK and NUMPROCS for multithreaded runs  {{{
+%Get rank and numprocs for multi-threaded runs  {{{
 rank=getfieldvalue(options,'rank',1);
 numprocs=getfieldvalue(options,'numprocs',1);
 if (numprocs<rank), numprocs=1; end
 % }}}
-%GET ids  {{{
+%Get IDs (create a list of all the test files in this directory that match a certain naming scheme) {{{
 flist=dir; %use dir, as it seems to act OS independent
 list_ids=[];
 for i=1:numel(flist),
 	fname=flist(i).name;
-	if (any(fname=='.')), %Before split, check that file name contains '.'
-		ftokens=strsplit(fname,'.'); %Tokenize file name on '.'
-		if (regexp(ftokens{1},'^test[0-9]+$') &... %Basename must start with 'test' and end with a number
-			strcmp(ftokens{end},'m') ... %Extension (less '.') must be 'm'
+	if (any(fname=='.')), %before split, check that file name contains '.'
+		ftokens=strsplit(fname,'.'); %tokenize file name on '.'
+		if (regexp(ftokens{1},'^test[0-9]+$') &... %basename must start with 'test' and end with an integer
+			strcmp(ftokens{end},'m') ... %extension (less '.') must be 'm'
 		),
 			id=sscanf(ftokens{1},'test%d');
@@ -101,10 +101,10 @@
 				disp(['WARNING: ignore file ' flist(i).name]);
 			else
-				list_ids(end+1)=id; %Keep test id only (strip 'test' and '.m')
+				list_ids(end+1)=id; %keep test id only (strip 'test' and '.m')
 			end
 		end
 	end
 end
-[i1,i2]=parallelrange(rank,numprocs,length(list_ids)); %Get tests for this cpu only
+[i1,i2]=parallelrange(rank,numprocs,length(list_ids)); %get tests for this CPU only
 list_ids=list_ids(i1:i2);
 
@@ -112,5 +112,5 @@
 test_ids=intersect(test_ids,list_ids);
 % }}}
-%GET exclude {{{
+%Get excluded tests {{{
 exclude_ids=getfieldvalue(options,'exclude',[]);
 exclude_ids=[exclude_ids];
@@ -118,5 +118,5 @@
 test_ids(pos)=[];
 % }}}
-%Process Ids according to benchmarks{{{
+%Process IDs according to benchmarks{{{
 if strcmpi(benchmark,'nightly'),
 	test_ids=intersect(test_ids,[1:999]);
@@ -155,5 +155,5 @@
 		run(['test' num2str(id)]);
 
-		%UPDATE ARCHIVE?
+		%Update archive?
 		archive_name=['Archive' num2str(id) ];
 		if strcmpi(procedure,'update'),
@@ -165,5 +165,5 @@
 			disp(sprintf(['File ./../Archives/' archive_name '.arch saved\n']));
 
-		%CHECK for memory leaks?
+		%Check for memory leaks?
 		elseif strcmpi(procedure,'valgrind'),
 			fields = fieldnames(md.results);
@@ -217,9 +217,9 @@
 				end
 			end
-		%PRODUCE nc files?
+		%Produce nc files?
 		elseif strcmpi(procedure,'ncExport'),
 			export_netCDF(md, ['test' num2str(id) 'ma.nc'])
 
-		%ELSE: CHECK TEST
+		%Else: Check test
 		else,
 			for k=1:length(field_names),
@@ -231,6 +231,6 @@
 					tolerance=field_tolerances{k};
 
-					%compare to archive
-					%our output is in the correct order (n,1) or (1,1), so we do not need to transpose again
+					%Compare to archive
+					%Our output is in the correct order (n,1) or (1,1), so we do not need to transpose again
 					archive_cell=archread(['../Archives/' archive_name '.arch'],[archive_name '_field' num2str(k)]);
 					archive=archive_cell{1};
@@ -247,5 +247,5 @@
 				catch me2
 
-					%something went wrong, print failure message:
+					%Something went wrong, print failure message
 					message=getReport(me2);
 					fprintf('%s',message);
@@ -267,5 +267,5 @@
 	catch me,
 
-		%something went wrong, print failure message:
+		%Something went wrong, print failure message
 		message=getReport(me);
 		fprintf('%s',message);
Index: /issm/trunk-jpl/test/NightlyRun/test204.py
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/test204.py	(revision 28157)
+++ /issm/trunk-jpl/test/NightlyRun/test204.py	(revision 28158)
@@ -10,13 +10,14 @@
 from generic import generic
 
-md = triangle(model(), '../Exp/Square.exp', 180000)
+md = triangle(model(), '../Exp/Square.exp', 180000.)
 md = setmask(md, 'all', '')
 md = parameterize(md, '../Par/SquareShelf.py')
 md.extrude(3, 2.)
 md = setflowequation(md, 'FS', 'all')
-md.cluster = generic('name', gethostname(), 'np', 3)
+md.cluster = generic('name', gethostname(), 'np', 1)
 md.stressbalance.shelf_dampening = 1
 md.timestepping.time_step = 0
-md1 = solve(md, 'Stressbalance')
+md1 = deepcopy(md)
+md1 = solve(md1, 'Stressbalance')
 md.stressbalance.shelf_dampening = 0
 md = solve(md, 'Stressbalance')
Index: /issm/trunk-jpl/test/NightlyRun/test703.py
===================================================================
--- /issm/trunk-jpl/test/NightlyRun/test703.py	(revision 28157)
+++ /issm/trunk-jpl/test/NightlyRun/test703.py	(revision 28158)
@@ -92,5 +92,5 @@
 md.cluster = generic('np', 3)
 md.stressbalance.shelf_dampening = 1
-md1 = copy.deepcopy(md)
+md1 = deepcopy(md)
 md1 = solve(md1, 'Transient')
 
Index: /issm/trunk-jpl/test/Par/SquareShelf.py
===================================================================
--- /issm/trunk-jpl/test/Par/SquareShelf.py	(revision 28157)
+++ /issm/trunk-jpl/test/Par/SquareShelf.py	(revision 28158)
@@ -74,5 +74,5 @@
 md.thermal.stabilization = 1
 md.settings.waitonlock = 30
-md.verbose = verbose()
+md.verbose = verbose(0)
 md.stressbalance.restol = 0.10
 md.steadystate.reltol = 0.02
Index: /issm/trunk-jpl/test/Par/SquareShelf2.py
===================================================================
--- /issm/trunk-jpl/test/Par/SquareShelf2.py	(revision 28157)
+++ /issm/trunk-jpl/test/Par/SquareShelf2.py	(revision 28158)
@@ -64,5 +64,5 @@
 md.thermal.stabilization = 1.
 md.settings.waitonlock = 30
-md.verbose = verbose()
+md.verbose = verbose(0)
 md.stressbalance.restol = 0.10
 md.steadystate.reltol = 0.02
