Changeset 28158
- Timestamp:
- 03/15/24 14:25:42 (13 months ago)
- Location:
- issm/trunk-jpl
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/interp
- Property svn:ignore
-
old new 1 1 Makefile.in 2 2 Makefile 3 __pycache__
-
- Property svn:ignore
-
issm/trunk-jpl/src/m/mesh/bamg.m
r25619 r28158 29 29 % - NoBoundaryRefinementAllBoundaries: do not refine boundary, only follow contour provided (default 0) 30 30 % - maxnbv : maximum number of vertices used to allocate memory (default is 10^6) 31 % - maxsubdiv : maximum subdivision of exis iting elements (default is 10)31 % - maxsubdiv : maximum subdivision of existing elements (default is 10) 32 32 % - metric : matrix (numberofnodes x 3) used as a metric 33 33 % - Metrictype : 0 -> absolute error c/(err coeff^2) * Abs(H) (default) … … 58 58 options=deleteduplicates(options,1); 59 59 60 %initialize the structures required as input of B amg60 %initialize the structures required as input of BAMG 61 61 bamg_options=struct(); 62 62 bamg_geometry=bamggeom(); … … 65 65 subdomain_ref = 1; 66 66 hole_ref = 1; 67 % B amgGeometry parameters {{{67 % BAMG Geometry parameters {{{ 68 68 if exist(options,'domain'), 69 69 … … 453 453 end 454 454 %}}} 455 % B amgMesh parameters {{{455 % BAMG Mesh parameters {{{ 456 456 if (~exist(options,'domain') & md.mesh.numberofvertices~=0 & strcmp(elementtype(md.mesh),'Tria')), 457 457 … … 468 468 end 469 469 %}}} 470 % B amgOptions {{{470 % BAMG Options {{{ 471 471 bamg_options.Crack=getfieldvalue(options,'Crack',0); 472 bamg_options.anisomax=getfieldvalue(options,'anisomax',1 0.^30);472 bamg_options.anisomax=getfieldvalue(options,'anisomax',1e30); 473 473 bamg_options.coeff=getfieldvalue(options,'coeff',1.); 474 bamg_options.cutoff=getfieldvalue(options,'cutoff',1 0.^-5);474 bamg_options.cutoff=getfieldvalue(options,'cutoff',1e-5); 475 475 bamg_options.err=getfieldvalue(options,'err',0.01); 476 476 bamg_options.errg=getfieldvalue(options,'errg',0.1); … … 478 478 bamg_options.gradation=getfieldvalue(options,'gradation',1.5); 479 479 bamg_options.Hessiantype=getfieldvalue(options,'Hessiantype',0); 480 bamg_options.hmin=getfieldvalue(options,'hmin',1 0.^-100);481 bamg_options.hmax=getfieldvalue(options,'hmax',1 0.^100);480 bamg_options.hmin=getfieldvalue(options,'hmin',1e-100); 481 bamg_options.hmax=getfieldvalue(options,'hmax',1e100); 482 482 bamg_options.hminVertices=getfieldvalue(options,'hminVertices',[]); 483 483 bamg_options.hmaxVertices=getfieldvalue(options,'hmaxVertices',[]); 484 484 bamg_options.hVertices=getfieldvalue(options,'hVertices',[]); 485 485 bamg_options.KeepVertices=getfieldvalue(options,'KeepVertices',1); 486 bamg_options.maxnbv=getfieldvalue(options,'maxnbv',1 0^6);486 bamg_options.maxnbv=getfieldvalue(options,'maxnbv',1e6); 487 487 bamg_options.maxsubdiv=getfieldvalue(options,'maxsubdiv',10.); 488 488 bamg_options.metric=getfieldvalue(options,'metric',[]); … … 496 496 %}}} 497 497 498 %call B amg498 %call BAMG 499 499 [bamgmesh_out bamggeom_out]=BamgMesher(bamg_mesh,bamg_geometry,bamg_options); 500 500 … … 550 550 end 551 551 552 %B amgprivate fields552 %BAMG private fields 553 553 md.private.bamg=struct(); 554 554 md.private.bamg.mesh=bamgmesh(bamgmesh_out); -
issm/trunk-jpl/src/m/mesh/bamg.py
r27458 r28158 19 19 20 20 def bamg(md, *args): 21 """ BAMG- mesh generation21 """bamg - mesh generation 22 22 23 23 Available options (for more details see ISSM website http://issm.jpl.nasa.gov/): 24 24 25 - domain : followed by an ARGUS file that 26 prescribes the domain outline 27 - holes : followed by an ARGUS file that 28 prescribes the holes 29 - subdomains : followed by an ARGUS file that 30 prescribes the list of subdomains 31 (that need to be inside domain) 32 33 - hmin : minimum edge length (default is 34 1.0e-100) 35 - hmax : maximum edge length (default is 36 1.0e100) 37 - hVertices : imposed edge length for each vertex 38 (geometry or mesh) 39 - hminVertices : minimum edge length for each vertex 40 (mesh) 41 - hmaxVertices : maximum edge length for each vertex 42 (mesh) 43 44 - anisomax : maximum ratio between the smallest 45 and largest edges (default is 46 1.0e30) 47 - coeff : coefficient applied to the metric 48 (2 -> twice as many elements, 49 default is 1) 50 - cutoff : scalar used to compute the metric 51 when metric type 2 or 3 are applied 52 - err : error used to generate the metric 53 from a field 25 - domain : followed by an ARGUS file that prescribes the domain outline 26 - holes : followed by an ARGUS file that prescribes the holes 27 - subdomains : followed by an ARGUS file that prescribes the list of subdomains (that need to be inside domain) 28 29 - hmin : minimum edge length (default is 1.0e-100) 30 - hmax : maximum edge length (default is 1.0e100) 31 - hVertices : imposed edge length for each vertex (geometry or mesh) 32 - hminVertices : minimum edge length for each vertex (mesh) 33 - hmaxVertices : maximum edge length for each vertex (mesh) 34 35 - anisomax : maximum ratio between the smallest and largest edges (default is 1.0e30) 36 - coeff : coefficient applied to the metric (2 -> twice as many elements, default is 1) 37 - cutoff : scalar used to compute the metric when metric type 2 or 3 are applied 38 - err : error used to generate the metric from a field 54 39 - errg : geometric error (default is 0.1) 55 - field : field of the model that will be 56 used to compute the metric to apply 57 several fields, use one column per 58 field 59 - gradation : maximum ratio between two adjacent 60 edges 61 - Hessiantype : 0 -> use double P2 projection 62 (default) 40 - field : field of the model that will be used to compute the metric to apply several fields, use one column per field 41 - gradation : maximum ratio between two adjacent edges 42 - Hessiantype : 0 -> use double P2 projection (default) 63 43 1 -> use Green formula 64 - KeepVertices : try to keep initial vertices when 65 adaptation is done on an existing 66 mesh (default 1) 67 - NoBoundaryRefinement : do not refine boundary, only follow 68 contour provided (default 0). Allow 69 subdomain boundary refinement 70 though 71 - NoBoundaryRefinementAllBoundaries : do not refine boundary, only follow 72 contour provided (default 0) 73 - maxnbv : maximum number of vertices used to 74 allocate memory (default is 1.0e6) 75 - maxsubdiv : maximum subdivision of exisiting 76 elements (default is 10) 77 - metric : matrix (numberofnodes x 3) used as 78 a metric 79 - Metrictype : 1 -> absolute error 80 c/(err coeff^2) * Abs(H) (default) 81 2 -> relative error 82 c / (err coeff^2) * Abs(H) / 83 max(s, cutoff * max(s)) 84 3 -> rescaled absolute error 85 c / (err coeff^2) * Abs(H) / 86 (smax - smin) 87 - nbjacoby : correction used by Hessiantype = 1 88 (default is 1) 89 - nbsmooth : number of metric smoothing 90 procedure (default is 3) 91 - omega : relaxation parameter of the 92 smoothing procedure (default is 93 1.8) 94 - power : power applied to the metric 95 (default is 1) 96 - splitcorners : split triangles which have 3 97 vertices on the outline (default is 98 1) 44 - KeepVertices : try to keep initial vertices when adaptation is done on an existing mesh (default 1) 45 - NoBoundaryRefinement : do not refine boundary, only follow contour provided (default 0). Allow subdomain boundary refinement though 46 - NoBoundaryRefinementAllBoundaries : do not refine boundary, only follow contour provided (default 0) 47 - maxnbv : maximum number of vertices used to allocate memory (default is 1.0e6) 48 - maxsubdiv : maximum subdivision of existing elements (default is 10) 49 - metric : matrix (numberofnodes x 3) used as a metric 50 - Metrictype : 1 -> absolute error c/(err coeff^2) * Abs(H) (default) 51 2 -> relative error c / (err coeff^2) * Abs(H) / max(s, cutoff * max(s)) 52 3 -> rescaled absolute error c / (err coeff^2) * Abs(H) / (smax - smin) 53 - nbjacoby : correction used by Hessiantype = 1 (default is 1) 54 - nbsmooth : number of metric smoothing procedure (default is 3) 55 - omega : relaxation parameter of the smoothing procedure (default is 1.8) 56 - power : power applied to the metric (default is 1) 57 - splitcorners : split triangles which have 3 vertices on the outline (default is 1) 99 58 - verbose : level of verbosity (default is 1) 100 59 101 - rifts : followed by an ARGUS file that 102 prescribes the rifts 103 - toltip : tolerance to move tip on an 104 existing point of the domain 105 outline 106 - tracks : followed by an ARGUS file that 107 prescribes the tracks that the mesh 108 will stick to 109 - RequiredVertices : mesh vertices that are required. 110 [x, y, ref]; ref is optional 111 - tol : if the distance between 2 points of 112 the domain outline is less than 113 tol, they will be merged 60 - vertical : is this a 2d vertical mesh (flowband, default is 0) 61 - rifts : followed by an ARGUS file that prescribes the rifts 62 - toltip : tolerance to move tip on an existing point of the domain outline 63 - tracks : followed by an ARGUS file that prescribes the tracks that the mesh will stick to 64 - RequiredVertices : mesh vertices that are required. [x, y, ref]; ref is optional 65 - tol : if the distance between 2 points of the domain outline is less than tol, they will be merged 114 66 115 67 Examples: … … 126 78 #options = deleteduplicates(options, 1) 127 79 128 # Initialize the structures required as input of B amg80 # Initialize the structures required as input of BAMG 129 81 bamg_options = OrderedDict() 130 82 bamg_geometry = bamggeom() … … 134 86 hole_ref = 1 135 87 136 # B amgGeometry parameters {{{88 # BAMG Geometry parameters {{{ 137 89 if options.exist('domain'): 138 90 #Check that file exists … … 532 484 bamg_geometry = bamggeom(md.private.bamg['geometry'].__dict__) 533 485 else: 534 # do nothing...486 # Do nothing... 535 487 pass 536 488 # }}} 537 # B amgmesh parameters {{{489 # BAMG mesh parameters {{{ 538 490 if not options.exist('domain') and md.mesh.numberofvertices and md.mesh.elementtype() == 'Tria': 539 491 if isinstance(md.private.bamg, dict) and 'mesh' in md.private.bamg: … … 548 500 549 501 if isinstance(md.rifts.riftstruct, dict): 550 raise TypeError( "bamg error message: rifts not supported yet. Do meshprocessrift AFTER bamg")502 raise TypeError('bamg error message: rifts not supported yet. Do meshprocessrift AFTER bamg') 551 503 # }}} 552 # B amgoptions {{{504 # BAMG options {{{ 553 505 bamg_options['Crack'] = options.getfieldvalue('Crack', 0) 554 bamg_options['anisomax'] = options.getfieldvalue('anisomax', pow(10.0, 18))506 bamg_options['anisomax'] = options.getfieldvalue('anisomax', 1e30) 555 507 bamg_options['coeff'] = options.getfieldvalue('coeff', 1.0) 556 bamg_options['cutoff'] = options.getfieldvalue('cutoff', pow(10.0, -5))557 bamg_options['err'] = options.getfieldvalue('err', np.array([[0.01]]))508 bamg_options['cutoff'] = options.getfieldvalue('cutoff', 1e-5) 509 bamg_options['err'] = options.getfieldvalue('err', 0.01) 558 510 bamg_options['errg'] = options.getfieldvalue('errg', 0.1) 559 511 bamg_options['field'] = options.getfieldvalue('field', np.empty((0, 1))) 560 512 bamg_options['gradation'] = options.getfieldvalue('gradation', 1.5) 561 513 bamg_options['Hessiantype'] = options.getfieldvalue('Hessiantype', 0) 562 bamg_options['hmin'] = options.getfieldvalue('hmin', pow(10.0, -100))563 bamg_options['hmax'] = options.getfieldvalue('hmax', pow(10.0, 100))514 bamg_options['hmin'] = options.getfieldvalue('hmin', 1e-100) 515 bamg_options['hmax'] = options.getfieldvalue('hmax', 1e100) 564 516 bamg_options['hminVertices'] = options.getfieldvalue('hminVertices', np.empty((0, 1))) 565 517 bamg_options['hmaxVertices'] = options.getfieldvalue('hmaxVertices', np.empty((0, 1))) 566 518 bamg_options['hVertices'] = options.getfieldvalue('hVertices', np.empty((0, 1))) 567 519 bamg_options['KeepVertices'] = options.getfieldvalue('KeepVertices', 1) 568 bamg_options['maxnbv'] = options.getfieldvalue('maxnbv', pow(10, 6))520 bamg_options['maxnbv'] = options.getfieldvalue('maxnbv', 1e6) 569 521 bamg_options['maxsubdiv'] = options.getfieldvalue('maxsubdiv', 10.0) 570 522 bamg_options['metric'] = options.getfieldvalue('metric', np.empty((0, 1))) … … 578 530 # }}} 579 531 580 # Call B amg532 # Call BAMG 581 533 bamgmesh_out, bamggeom_out = BamgMesher(bamg_mesh.__dict__, bamg_geometry.__dict__, bamg_options) 582 534 … … 632 584 md.mesh.vertexonboundary[md.mesh.segments[:, 0:2] - 1] = 1 633 585 634 # B amgprivate fields586 # BAMG private fields 635 587 md.private.bamg = OrderedDict() 636 588 md.private.bamg['mesh'] = bamgmesh(bamgmesh_out) … … 732 684 733 685 if num: 734 print( ('WARNING: {} points outside the domain outline have been removed'.format(num)))686 print('WARNING: {} points outside the domain outline have been removed'.format(num)) 735 687 736 688 """ -
issm/trunk-jpl/src/m/modules/ContourToMesh.py
r24213 r28158 3 3 4 4 def ContourToMesh(index, x, y, contourname, interptype, edgevalue): 5 """ 5 """ContourToMesh - Flag the elements or nodes inside a contour 6 6 7 CONTOURTOMESH - Flag the elements or nodes inside a contour 7 Usage: 8 [in_nod, in_elem] = ContourToMesh(index, x, y, contourname, interptype, edgevalue) 8 9 9 Usage: 10 [in_nod, in_elem] = ContourToMesh(index, x, y, contourname, interptype, edgevalue) 10 index, x, y: mesh triangulation. 11 contourname: name of .exp file containing the contours. 12 interptype: string defining type of interpolation ('element', or 'node'). 13 edgevalue: integer (0, 1 or 2) defining the value associated to the nodes on the edges of the polygons. 14 in_nod: vector of flags (0 or 1), of size nel if interptype is set to 'node' or 'element and node', or of size 0 otherwise. 15 in_elem: vector of flags (0 or 1), of size nel if interptype is set to 'element' or 'element and node', or of size 0 otherwise. 11 16 12 index, x, y: mesh triangulation. 13 contourname: name of .exp file containing the contours. 14 interptype: string defining type of interpolation ('element', or 'node'). 15 edgevalue: integer (0, 1 or 2) defining the value associated to the nodes on the edges of the polygons. 16 in_nod: vector of flags (0 or 1), of size nel if interptype is set to 'node' or 'element and node', 17 or of size 0 otherwise. 18 in_elem: vector of flags (0 or 1), of size nel if interptype is set to 'element' or 'element and node', 19 or of size 0 otherwise. 20 21 Example: 22 in_nod = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'node', 1) 23 in_elements = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element', 0) 24 [in_nodes, in_elements] = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element and node', 0) 17 Example: 18 in_nod = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'node', 1) 19 in_elements = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element', 0) 20 [in_nodes, in_elements] = ContourToMesh(md.elements, md.x, md.y, 'Contour.exp', 'element and node', 0) 25 21 """ 26 22 #Call mex module -
issm/trunk-jpl/src/m/modules/ContourToNodes.py
r25455 r28158 3 3 4 4 def ContourToNodes(x, y, contourname, edgevalue): 5 """C ONTOURTONODES- flags vertices inside contour5 """ContourToNodes - flags vertices inside contour 6 6 7 7 x, y: list of nodes -
issm/trunk-jpl/src/m/solve/WriteData.py
r26553 r28158 8 8 9 9 def WriteData(fid, prefix, *args): 10 """W RITEDATA- write model field in binary file10 """WriteData - write model field in binary file 11 11 12 12 Usage: … … 14 14 """ 15 15 16 # process options16 # Process options 17 17 options = pairoptions.pairoptions(*args) 18 # Get data properties18 # Get data properties 19 19 if options.exist('object'): 20 # This is an object field, construct enum and data20 # This is an object field, construct enum and data 21 21 obj = options.getfieldvalue('object') 22 22 fieldname = options.getfieldvalue('fieldname') … … 27 27 data = getattr(obj, fieldname) 28 28 else: 29 # No processing required29 # No processing required 30 30 data = options.getfieldvalue('data') 31 31 name = options.getfieldvalue('name') … … 35 35 timeserieslength = options.getfieldvalue('timeserieslength', -1) 36 36 37 # Process sparse matrices37 # Process sparse matrices 38 38 # if issparse(data), 39 39 # data = full(data) 40 40 # end 41 41 42 # Make a copy of the the data so that we do not accident ly overwrite any42 # Make a copy of the the data so that we do not accidentally overwrite any 43 43 # model fields. 44 44 # 45 45 data = copy(data) 46 46 47 # Scale data if necessary47 # Scale data if necessary 48 48 if datatype == 'MatArray': 49 # if it is a matrix array we loop over the matrices49 # If it is a matrix array we loop over the matrices 50 50 for i in range(len(data)): 51 51 if options.exist('scale'): 52 52 scale = options.getfieldvalue('scale') 53 53 if np.ndim(data[i]) > 1 and data[i].shape[0] == timeserieslength: 54 # We scale everything but the last line that holds time54 # We scale everything but the last line that holds time 55 55 data[i][:-1, :] = scale * data[i][:-1, :] 56 56 else: 57 57 data[i] = scale * data[i] 58 58 if np.ndim(data[i]) > 1 and data[i].shape[0] == timeserieslength: 59 # no scaling given, just get the time right59 # No scaling given, just get the time right 60 60 yts = options.getfieldvalue('yts') 61 # We scale only the last line that holds time61 # We scale only the last line that holds time 62 62 data[i][-1, :] = yts * data[i][-1, :] 63 63 else: … … 65 65 scale = options.getfieldvalue('scale') 66 66 if np.ndim(data) > 1 and data.shape[0] == timeserieslength: 67 # We scale everything but the last line that holds time67 # We scale everything but the last line that holds time 68 68 data[:-1, :] = scale * data[:-1, :] 69 69 elif type(data) is list: # Deal with "TypeError: can't multiply sequence by non-int of type 'float'" for type list … … 72 72 scaleddata.append(scale * data[i]) 73 73 data = scaleddata 74 else: #74 else: 75 75 data = scale * data 76 76 if np.ndim(data) > 1 and data.shape[0] == timeserieslength: 77 77 yts = options.getfieldvalue('yts') 78 # We scale only the last line that holds time78 # We scale only the last line that holds time 79 79 # scaler = np.ones((np.shape(data))) 80 80 # scaler[-1, :] = yts … … 82 82 data[-1, :] = yts * data[-1, :] 83 83 84 # Step 1: write the enum to identify this record uniquely84 # Step 1: write the enum to identify this record uniquely 85 85 fid.write(pack('i', len(name))) 86 86 fid.write(pack('{}s'.format(len(name)), name.encode())) 87 87 88 # Step 2: write the data itself.88 # Step 2: write the data itself. 89 89 if datatype == 'Boolean': # {{{ 90 # first write length of record90 # First write length of record 91 91 fid.write(pack('q', 4 + 4)) #1 bool (disguised as an int) + code 92 # write data code:93 fid.write(pack('i', FormatToCode(datatype))) 94 95 # now write bool as an integer92 # Write data code 93 fid.write(pack('i', FormatToCode(datatype))) 94 95 # Now write bool as an integer 96 96 try: 97 97 fid.write(pack('i', int(data))) #send an int, not easy to send a bool … … 101 101 102 102 elif datatype == 'Integer': # {{{ 103 # first write length of record103 # First write length of record 104 104 fid.write(pack('q', 4 + 4)) #1 integer + code 105 # write data code:106 fid.write(pack('i', FormatToCode(datatype))) 107 # now write integer108 try: 109 fid.write(pack('i', int(data))) #force an int,105 # Write data code: 106 fid.write(pack('i', FormatToCode(datatype))) 107 # Now write integer 108 try: 109 fid.write(pack('i', int(data))) # force an int 110 110 except error as Err: 111 111 raise ValueError('field {} cannot be marshaled, {}'.format(name, Err)) … … 113 113 114 114 elif datatype == 'Double': # {{{ 115 # first write length of record116 fid.write(pack('q', 8 + 4)) #1 double + code117 118 #write data code:119 fid.write(pack('i', FormatToCode(datatype))) 120 121 #now write double115 # First write length of record 116 fid.write(pack('q', 8 + 4)) # 1 double + code 117 118 # Write data code 119 fid.write(pack('i', FormatToCode(datatype))) 120 121 # Now write double 122 122 try: 123 123 fid.write(pack('d', data)) … … 127 127 128 128 elif datatype == 'String': # {{{ 129 # first write length of record130 fid.write(pack('q', len(data) + 4 + 4)) #string + string size + code131 # write data code:132 fid.write(pack('i', FormatToCode(datatype))) 133 # now write string129 # First write length of record 130 fid.write(pack('q', len(data) + 4 + 4)) # string + string size + code 131 # Write data code 132 fid.write(pack('i', FormatToCode(datatype))) 133 # Now write string 134 134 fid.write(pack('i', len(data))) 135 135 fid.write(pack('{}s'.format(len(data)), data.encode())) … … 147 147 data = data.reshape(0, 0) 148 148 149 #Get size149 # Get size 150 150 s = data.shape 151 #if matrix = NaN, then do not write anything151 # If matrix = NaN, then do not write anything 152 152 if np.ndim(data) == 2 and np.product(s) == 1 and np.all(np.isnan(data)): 153 153 s = (0, 0) 154 154 155 #first write length of record156 recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 #2 integers (32 bits) + the double matrix + code + matrix type155 # First write length of record 156 recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 # 2 integers (32 bits) + the double matrix + code + matrix type 157 157 fid.write(pack('q', recordlength)) 158 158 159 #write data code and matrix type:159 # Write data code and matrix type 160 160 fid.write(pack('i', FormatToCode(datatype))) 161 161 fid.write(pack('i', mattype)) 162 162 163 #now write matrix163 # Now write matrix 164 164 fid.write(pack('i', s[0])) 165 165 try: … … 186 186 data = data.reshape(0, 0) 187 187 188 #Get size188 # Get size 189 189 s = data.shape 190 #if matrix = NaN, then do not write anything190 # If matrix = NaN, then do not write anything 191 191 if np.ndim(data) == 1 and np.product(s) == 1 and np.all(np.isnan(data)): 192 192 s = (0, 0) 193 193 194 #first write length of record195 recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 #2 integers (32 bits) + the double matrix + code + matrix type194 # First write length of record 195 recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 # 2 integers (32 bits) + the double matrix + code + matrix type 196 196 197 197 try: … … 200 200 raise ValueError('Field {} can not be marshaled, {}, with "number" the length of the record.'.format(name, Err)) 201 201 202 #write data code and matrix type:202 # Write data code and matrix type 203 203 fid.write(pack('i', FormatToCode(datatype))) 204 204 fid.write(pack('i', mattype)) 205 #now write matrix205 # Now write matrix 206 206 fid.write(pack('i', s[0])) 207 207 try: … … 211 211 for i in range(s[0]): 212 212 if np.ndim(data) == 1: 213 fid.write(pack('d', float(data[i]))) #get to the "c" convention, hence the transpose213 fid.write(pack('d', float(data[i]))) # get to the "c" convention, hence the transpose 214 214 else: 215 215 for j in range(s[1]): 216 fid.write(pack('d', float(data[i][j]))) #get to the "c" convention, hence the transpose216 fid.write(pack('d', float(data[i][j]))) # get to the "c" convention, hence the transpose 217 217 # }}} 218 218 … … 228 228 data = data.reshape(0, 0) 229 229 230 #Get size230 # Get size 231 231 s = data.shape 232 232 if np.ndim(data) == 1: … … 235 235 n2 = s[1] 236 236 237 #if matrix = NaN, then do not write anything237 # If matrix = NaN, then do not write anything 238 238 if np.ndim(data) == 1 and np.product(s) == 1 and np.all(np.isnan(data)): 239 239 s = (0, 0) 240 240 n2 = 0 241 241 242 #first write length of record243 recordlength = 4 + 4 + 8 + 8 + 1 * (s[0] - 1) * n2 + 8 * n2 + 4 + 4 #2 integers (32 bits) + the matrix + code + matrix type242 # First write length of record 243 recordlength = 4 + 4 + 8 + 8 + 1 * (s[0] - 1) * n2 + 8 * n2 + 4 + 4 # 2 integers (32 bits) + the matrix + code + matrix type 244 244 try: 245 245 fid.write(pack('q', recordlength)) … … 247 247 raise ValueError('Field {} can not be marshaled, {}, with "number" the lenght of the record.'.format(name, Err)) 248 248 249 #write data code and matrix type:249 # Write data code and matrix type 250 250 fid.write(pack('i', FormatToCode(datatype))) 251 251 fid.write(pack('i', mattype)) 252 252 253 #Write offset and range253 # Write offset and range 254 254 A = data[0:s[0] - 1] 255 255 offsetA = A.min() … … 261 261 A = (A - offsetA) / rangeA * 255. 262 262 263 #now write matrix263 # Now write matrix 264 264 fid.write(pack('i', s[0])) 265 265 try: … … 272 272 for i in range(s[0] - 1): 273 273 fid.write(pack('B', int(A[i]))) 274 fid.write(pack('d', float(data[s[0] - 1]))) #get to the "c" convention, hence the transpose274 fid.write(pack('d', float(data[s[0] - 1]))) # get to the "c" convention, hence the transpose 275 275 276 276 elif np.product(s) > 0: 277 277 for i in range(s[0] - 1): 278 278 for j in range(s[1]): 279 fid.write(pack('B', int(A[i][j]))) #get to the "c" convention, hence the transpose279 fid.write(pack('B', int(A[i][j]))) # get to the "c" convention, hence the transpose 280 280 281 281 for j in range(s[1]): … … 285 285 286 286 elif datatype == 'MatArray': # {{{ 287 # first get length of record288 recordlength = 4 + 4 #number of records + code287 # First get length of record 288 recordlength = 4 + 4 # number of records + code 289 289 for matrix in data: 290 290 if isinstance(matrix, (bool, int, float)): … … 299 299 300 300 s = matrix.shape 301 recordlength += 4 * 2 + np.product(s) * 8 #row and col of matrix + matrix of doubles302 303 #write length of record301 recordlength += 4 * 2 + np.product(s) * 8 # row and col of matrix + matrix of doubles 302 303 # Write length of record 304 304 fid.write(pack('q', recordlength)) 305 305 306 #write data code:307 fid.write(pack('i', FormatToCode(datatype))) 308 309 #write data, first number of records306 # Write data code 307 fid.write(pack('i', FormatToCode(datatype))) 308 309 # Write data, first number of records 310 310 fid.write(pack('i', len(data))) 311 311 … … 327 327 for i in range(s[0]): 328 328 if np.ndim(matrix) == 1: 329 fid.write(pack('d', float(matrix[i]))) #get to the "c" convention, hence the transpose329 fid.write(pack('d', float(matrix[i]))) # get to the "c" convention, hence the transpose 330 330 else: 331 331 for j in range(s[1]): … … 334 334 335 335 elif datatype == 'StringArray': # {{{ 336 # first get length of record337 recordlength = 4 + 4 #for length of array + code336 # First get length of record 337 recordlength = 4 + 4 # for length of array + code 338 338 for string in data: 339 recordlength += 4 + len(string) #for each string340 341 # write length of record339 recordlength += 4 + len(string) # for each string 340 341 # Write length of record 342 342 fid.write(pack('q', recordlength)) 343 # write data code:344 fid.write(pack('i', FormatToCode(datatype))) 345 # now write length of string array343 # Write data code 344 fid.write(pack('i', FormatToCode(datatype))) 345 # Now write length of string array 346 346 fid.write(pack('i', len(data))) 347 # now write the strings347 # Now write the strings 348 348 for string in data: 349 349 fid.write(pack('i', len(string))) … … 357 357 358 358 def FormatToCode(datatype): # {{{ 359 """ F ORMATTOCODE- This routine takes the datatype string, and hardcodes it359 """ FormatToCode - This routine takes the datatype string, and hardcodes it 360 360 into an integer, which is passed along the record, in order to identify the 361 361 nature of the dataset being sent. -
issm/trunk-jpl/test/NightlyRun/runme.m
r27262 r28158 12 12 % 13 13 % Available options: 14 % 'id' followed by the list of ids requested15 % 'exclude' ids to be excluded from the test14 % 'id' followed by the list of test ID's or names to run 15 % 'exclude' followed by the list of test ID's or names to exclude 16 16 % 'benchmark' 'all' : (all of them) 17 17 % 'nightly' : (nightly run … … 61 61 62 62 %Process options 63 %G ETbenchmark {{{63 %Get benchmark {{{ 64 64 benchmark=getfieldvalue(options,'benchmark','nightly'); 65 65 if ~ismember(benchmark,{'all','nightly','ismip','eismint','thermal','mesh','validation','tranforcing','adolc','slc','qmu'}) … … 68 68 end 69 69 % }}} 70 %G ETprocedure {{{70 %Get procedure {{{ 71 71 procedure=getfieldvalue(options,'procedure','check'); 72 72 if ~ismember(procedure,{'check','update','valgrind','ncExport'}) … … 75 75 end 76 76 % }}} 77 %G EToutput {{{77 %Get output {{{ 78 78 output=getfieldvalue(options,'output','none'); 79 79 if ~ismember(output,{'nightly','none'}) … … 82 82 end 83 83 % }}} 84 %G ET RANK and NUMPROCS for multithreaded runs {{{84 %Get rank and numprocs for multi-threaded runs {{{ 85 85 rank=getfieldvalue(options,'rank',1); 86 86 numprocs=getfieldvalue(options,'numprocs',1); 87 87 if (numprocs<rank), numprocs=1; end 88 88 % }}} 89 %G ET ids{{{89 %Get IDs (create a list of all the test files in this directory that match a certain naming scheme) {{{ 90 90 flist=dir; %use dir, as it seems to act OS independent 91 91 list_ids=[]; 92 92 for i=1:numel(flist), 93 93 fname=flist(i).name; 94 if (any(fname=='.')), % Before split, check that file name contains '.'95 ftokens=strsplit(fname,'.'); % Tokenize file name on '.'96 if (regexp(ftokens{1},'^test[0-9]+$') &... % Basename must start with 'test' and end with a number97 strcmp(ftokens{end},'m') ... % Extension (less '.') must be 'm'94 if (any(fname=='.')), %before split, check that file name contains '.' 95 ftokens=strsplit(fname,'.'); %tokenize file name on '.' 96 if (regexp(ftokens{1},'^test[0-9]+$') &... %basename must start with 'test' and end with an integer 97 strcmp(ftokens{end},'m') ... %extension (less '.') must be 'm' 98 98 ), 99 99 id=sscanf(ftokens{1},'test%d'); … … 101 101 disp(['WARNING: ignore file ' flist(i).name]); 102 102 else 103 list_ids(end+1)=id; % Keep test id only (strip 'test' and '.m')103 list_ids(end+1)=id; %keep test id only (strip 'test' and '.m') 104 104 end 105 105 end 106 106 end 107 107 end 108 [i1,i2]=parallelrange(rank,numprocs,length(list_ids)); % Get tests for this cpuonly108 [i1,i2]=parallelrange(rank,numprocs,length(list_ids)); %get tests for this CPU only 109 109 list_ids=list_ids(i1:i2); 110 110 … … 112 112 test_ids=intersect(test_ids,list_ids); 113 113 % }}} 114 %G ET exclude{{{114 %Get excluded tests {{{ 115 115 exclude_ids=getfieldvalue(options,'exclude',[]); 116 116 exclude_ids=[exclude_ids]; … … 118 118 test_ids(pos)=[]; 119 119 % }}} 120 %Process I ds according to benchmarks{{{120 %Process IDs according to benchmarks{{{ 121 121 if strcmpi(benchmark,'nightly'), 122 122 test_ids=intersect(test_ids,[1:999]); … … 155 155 run(['test' num2str(id)]); 156 156 157 %U PDATE ARCHIVE?157 %Update archive? 158 158 archive_name=['Archive' num2str(id) ]; 159 159 if strcmpi(procedure,'update'), … … 165 165 disp(sprintf(['File ./../Archives/' archive_name '.arch saved\n'])); 166 166 167 %C HECKfor memory leaks?167 %Check for memory leaks? 168 168 elseif strcmpi(procedure,'valgrind'), 169 169 fields = fieldnames(md.results); … … 217 217 end 218 218 end 219 %P RODUCEnc files?219 %Produce nc files? 220 220 elseif strcmpi(procedure,'ncExport'), 221 221 export_netCDF(md, ['test' num2str(id) 'ma.nc']) 222 222 223 %E LSE: CHECK TEST223 %Else: Check test 224 224 else, 225 225 for k=1:length(field_names), … … 231 231 tolerance=field_tolerances{k}; 232 232 233 % compare to archive234 % our output is in the correct order (n,1) or (1,1), so we do not need to transpose again233 %Compare to archive 234 %Our output is in the correct order (n,1) or (1,1), so we do not need to transpose again 235 235 archive_cell=archread(['../Archives/' archive_name '.arch'],[archive_name '_field' num2str(k)]); 236 236 archive=archive_cell{1}; … … 247 247 catch me2 248 248 249 % something went wrong, print failure message:249 %Something went wrong, print failure message 250 250 message=getReport(me2); 251 251 fprintf('%s',message); … … 267 267 catch me, 268 268 269 % something went wrong, print failure message:269 %Something went wrong, print failure message 270 270 message=getReport(me); 271 271 fprintf('%s',message); -
issm/trunk-jpl/test/NightlyRun/test204.py
r28148 r28158 10 10 from generic import generic 11 11 12 md = triangle(model(), '../Exp/Square.exp', 180000 )12 md = triangle(model(), '../Exp/Square.exp', 180000.) 13 13 md = setmask(md, 'all', '') 14 14 md = parameterize(md, '../Par/SquareShelf.py') 15 15 md.extrude(3, 2.) 16 16 md = setflowequation(md, 'FS', 'all') 17 md.cluster = generic('name', gethostname(), 'np', 3)17 md.cluster = generic('name', gethostname(), 'np', 1) 18 18 md.stressbalance.shelf_dampening = 1 19 19 md.timestepping.time_step = 0 20 md1 = solve(md, 'Stressbalance') 20 md1 = deepcopy(md) 21 md1 = solve(md1, 'Stressbalance') 21 22 md.stressbalance.shelf_dampening = 0 22 23 md = solve(md, 'Stressbalance') -
issm/trunk-jpl/test/NightlyRun/test703.py
r24862 r28158 92 92 md.cluster = generic('np', 3) 93 93 md.stressbalance.shelf_dampening = 1 94 md1 = copy.deepcopy(md)94 md1 = deepcopy(md) 95 95 md1 = solve(md1, 'Transient') 96 96 -
issm/trunk-jpl/test/Par/SquareShelf.py
r25461 r28158 74 74 md.thermal.stabilization = 1 75 75 md.settings.waitonlock = 30 76 md.verbose = verbose( )76 md.verbose = verbose(0) 77 77 md.stressbalance.restol = 0.10 78 78 md.steadystate.reltol = 0.02 -
issm/trunk-jpl/test/Par/SquareShelf2.py
r25455 r28158 64 64 md.thermal.stabilization = 1. 65 65 md.settings.waitonlock = 30 66 md.verbose = verbose( )66 md.verbose = verbose(0) 67 67 md.stressbalance.restol = 0.10 68 68 md.steadystate.reltol = 0.02
Note:
See TracChangeset
for help on using the changeset viewer.