Changeset 26183


Ignore:
Timestamp:
04/08/21 03:57:34 (4 years ago)
Author:
bdef
Message:

CHG: cleaning decomissioned clusters

Location:
issm/trunk-jpl/src/m
Files:
3 deleted
12 edited

Legend:

Unmodified
Added
Removed
  • issm/trunk-jpl/src/m/classes/clusters/generic.py

    r25758 r26183  
    121121
    122122                if IssmConfig('_HAVE_MPI_')[0]:
    123                     fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '.
    124                               format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
     123                    fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
     124                    #fid.write('mpiexec -np {} {} --tool=callgrind {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
    125125                else:
    126126                    fid.write('{} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '.
  • issm/trunk-jpl/src/m/classes/model.py

    r26059 r26183  
    3838from generic import generic
    3939from pfe import pfe
    40 from vilje import vilje
    41 from hexagon import hexagon
    4240from cyclone import cyclone
    43 from stallo import stallo
    4441from saga import saga
    4542from balancethickness import balancethickness
     
    185182    def __repr__(obj):  #{{{
    186183        # TODO:
    187         # - Convert all formatting to calls to <string>.format (see any 
     184        # - Convert all formatting to calls to <string>.format (see any
    188185        #   already converted <class>.__repr__ method for examples)
    189186        #
     
    893890                    md.hydrology.__dict__[field] = project2d(md, md.hydrology.__dict__[field], 1)
    894891
    895         # Materials
    896892        md.materials.rheology_B = DepthAverage(md, md.materials.rheology_B)
    897893        md.materials.rheology_n = project2d(md, md.materials.rheology_n, 1)
  • issm/trunk-jpl/src/m/coordsystems/gmtmask.m

    r25860 r26183  
    66%
    77
    8         %are we doing a recursive call? 
     8        %are we doing a recursive call?
    99        if nargin==3,
    1010                recursive=1;
    11         else 
     11        else
    1212                recursive=0;
    1313        end
    14        
     14
    1515        if(recursive)disp(sprintf('             recursing: num vertices %i',length(lat)));
    1616        else disp(sprintf('gmtmask: num vertices %i',length(lat)));
    1717        end
    18        
    19         %Check lat and long size is not more than 50,000; If so, recursively call gmtmask: 
     18
     19        %Check lat and long size is not more than 50,000; If so, recursively call gmtmask:
    2020        if length(lat)>50000,
    2121                for i=1:50000:length(lat),
     
    2828                return
    2929        end
    30        
     30
    3131        %First, write our lat,long file for gmt:
    3232        nv=length(lat);
    33         filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt']; 
    34         filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt']; 
     33        filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt'];
     34        filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt'];
    3535        dlmwrite(filename_all,[long lat (1:nv)'],'delimiter','\t','precision',10);
     36
     37        %Avoid bypassing of the ld library path by Matlab (:()
     38        %
     39        % TODO: Do we really need this (we can/already set it in etc/environment.sh)?
     40        if ismac,
     41                dyld_library_path_old=getenv('DYLD_LIBRARY_PATH');
     42                setenv('DYLD_LIBRARY_PATH',[ issmdir '/externalpackages/curl/install/lib:' issmdir '/externalpackages/hdf5/install/lib:' issmdir '/externalpackages/netcdf/install/lib' ]);
     43        end
     44
     45        %Find path to gmt, list all possible known paths to gmt (you may need to add yours to the list)
     46        gmtpaths = {[issmdir '/bin/gmt'],[issmdir '/externalpackages/gmt/install/bin/gmt'],'/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt', '/usr/bin/gmt'};
     47        gmtpath = '';
     48        for i=gmtpaths
     49                if exist(i{1},'file'),
     50                        gmtpath = i{1};
     51                        break;
     52                end
     53        end
     54        if isempty(gmtpath),
     55                error('gmt not found! Make sure it is properly installed, or add its path to this file.');
     56        end
     57
    3658
    3759        %figure out which vertices are on the ocean, which one on the continent:
     
    4163        end
    4264
     65        %reset DYLD_LIBRARY_PATH to what it was:
     66        if ismac,
     67                setenv('DYLD_LIBRARY_PATH',dyld_library_path_old);
     68        end
    4369        %read the con_vertices.txt file and flag our mesh vertices on the continent
    4470        fid=fopen(['./' filename_oce],'r');
    45         line=fgets(fid); 
     71        line=fgets(fid);
    4672        line=fgets(fid);
    4773        oce_vertices=[];
     
    5581        mask=zeros(nv,1);
    5682        mask(oce_vertices)=1;
    57        
     83
    5884        system(['rm -rf ./' filename_all ' ./' filename_oce ' ./gmt.history']);
    59         if ~recursive, disp(sprintf('gmtmask: done')); end;
     85        if ~recursive,
     86                disp(sprintf('gmtmask: done'));
     87        end
  • issm/trunk-jpl/src/m/coordsystems/gmtmask.py

    r25860 r26183  
    4444    np.savetxt('./all_vertices.txt', np.transpose([long, lat, np.arange(1, nv + 1)]), delimiter='\t', fmt='%.10f')
    4545
     46    #Avoid bypassing of the ld library path by Matlab (:()
     47    issm_dir = issmdir()
     48
     49    try:
     50        ismac
     51    except NameError:
     52        ismac = False
     53
     54    # TODO: Do we really need this (we can/already set it in etc/environment.sh)?
     55    if ismac:
     56        dyld_library_path_old = os.getenv('DYLD_LIBRARY_PATH')
     57        os.putenv('DYLD_LIBRARY_PATH', issm_dir + '/externalpackages/curl/install/lib:' + issm_dir + '/externalpackages/hdf5/install/lib:' + issm_dir + '/externalpackages/netcdf/install/lib')
     58
     59    #Find path to gmt (you may need to add yours to the list).
     60    #
     61    # NOTE: Assumes gmtselect is also in this directory.
     62    #
     63    gmtpaths = ['/usr/bin/gmt',
     64                issm_dir + '/bin/gmt',
     65                issm_dir + '/externalpackages/gmt/install/bin/gmt',
     66                '/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt']
     67    gmtpath = ''
     68    for i in range(len(gmtpaths)):
     69        if os.path.isfile(gmtpaths[i]):
     70            gmtpath = gmtpaths[i]
     71            break
     72
     73    if gmtpath == '':
     74        raise Exception('gmt not found! Make sure it is properly installed, or add its path to this file.')
     75
    4676    #figure out which vertices are on the ocean, which one on the continent:
    4777    subprocess.call('gmtselect ./ all_vertices.txt -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s > ./oce_vertices.txt', shell=True)
  • issm/trunk-jpl/src/m/dev/devpath.m

    r26125 r26183  
    11% clear the last warning to focus on the warnings of the ISSM path
    2 lastwarn(''); 
     2lastwarn('');
    33
    44%Recover ISSM_DIR , or if on a Windows machine, ISSM_DIR_WIN
     
    77else
    88        ISSM_DIR=getenv('ISSM_DIR');
     9        ISSM_DEV_DIR=getenv('ISSM_DEV_DIR');
    910end
    1011
     
    1314end
    1415
    15 %Now add all issm code paths necessary to run issm smoothly. 
    16 %We capture the error output, so that we can warn the user to update 
    17 %the variable ISSM_DIR in this file, in case it is not correctly setup. 
     16%Now add all issm code paths necessary to run issm smoothly.
     17%We capture the error output, so that we can warn the user to update
     18%the variable ISSM_DIR in this file, in case it is not correctly setup.
    1819
    1920%ISSM path
    20 addpath([ISSM_DIR '/src/m/os/']); %load recursivepath
    21 addpath([ISSM_DIR '/lib']);       %load MEX files
    22 addpath(recursivepath([ISSM_DIR '/src/m']));
     21addpath([ISSM_DEV_DIR '/src/m/os/']); %load recursivepath
     22addpath([ISSM_DEV_DIR '/lib']);       %load mex
     23addpath(recursivepath([ISSM_DEV_DIR '/src/m']));
    2324addpath(recursivepath([ISSM_DIR '/externalpackages/scotch']));
    2425addpath(recursivepath([ISSM_DIR '/externalpackages/canos']));
     
    3233clear ISSM_DIR;
    3334
    34 %Check on any warning messages that might indicate that the paths were not correct. 
     35%Check on any warning messages that might indicate that the paths were not correct.
    3536if ~isempty(lastwarn),
    3637        fprintf('\n  Error trying to setup ''ISSM'' code paths. Try and update the ISSM_DIR variable in your .cshrc or .bashrc!\n');
  • issm/trunk-jpl/src/m/dev/devpath.py

    r25684 r26183  
    66#Recover ISSM_DIR and USERNAME
    77ISSM_DIR = os.getenv('ISSM_DIR')
     8ISSM_DEV_DIR = os.getenv('ISSM_DEV_DIR')
    89USERNAME = os.getenv('USER')
    910JPL_SVN = os.getenv('JPL_SVN')
     
    1213
    1314#Go through src/m and append any directory that contains a *.py file to PATH
    14 for root, dirs, files in os.walk(ISSM_DIR + '/src/m'):
     15for root, dirs, files in os.walk(ISSM_DEV_DIR + '/src/m'):
    1516    if '.svn' in dirs:
    1617        dirs.remove('.svn')
     
    2223
    2324#Also add the Nightly run directory
    24 if ISSM_DIR + '/test/NightlyRun' not in sys.path:
    25     sys.path.append(ISSM_DIR + '/test/NightlyRun')
    26 if ISSM_DIR + '/lib' not in sys.path:
    27     sys.path.append(ISSM_DIR + '/lib')
    28 if ISSM_DIR + '/src/wrappers/python/.libs' not in sys.path:
    29     sys.path.append(ISSM_DIR + '/src/wrappers/python/.libs')
     25if ISSM_DEV_DIR + '/test/NightlyRun' not in sys.path:
     26    sys.path.append(ISSM_DEV_DIR + '/test/NightlyRun')
     27if ISSM_DEV_DIR + '/lib' not in sys.path:
     28    sys.path.append(ISSM_DEV_DIR + '/lib')
     29if ISSM_DEV_DIR + '/src/wrappers/python/.libs' not in sys.path:
     30    sys.path.append(ISSM_DEV_DIR + '/src/wrappers/python/.libs')
    3031
    3132# If using clusters, we need to have the path to the cluster settings directory
     
    4849
    4950print("\n  ISSM development path correctly loaded")
    50 print("Current path is {}\n\n".format(ISSM_DIR))
     51print("Current path is {}\n\n".format(ISSM_DEV_DIR))
  • issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.m

    r25860 r26183  
    1414%      md.mesh=gmshplanet('radius',6000,'resolution',100);
    1515%      md.mesh=gmshplanet('radius',6000,'resolution',100);
     16
     17        %Find path to gmsh
     18        paths = {[issmdir() 'bin/gmsh'],...
     19                 [issmdir() 'externalpackages/gmsh/install/gmsh'],...
     20                 ['/usr/bin/gmsh']...
     21                };
     22        disp(paths{1})
     23        gmshpath = '';
     24        for i=paths
     25                if exist(i{1},'file'),
     26                        gmshpath = i{1};
     27                        break;
     28                end
     29        end
     30        if isempty(gmshpath),
     31                error('gmsh not found, make sure it is properly installed');
     32        end
    1633
    1734        % Get Gmsh version
     
    121138        % Call gmsh
    122139        %
    123         % NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 
    124         %               modifying our parsing scheme for Gmsh 4, for now, we simply set the 
     140        % NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally
     141        %               modifying our parsing scheme for Gmsh 4, for now, we simply set the
    125142        %               "-format" option.
    126143        %
  • issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.py

    r25344 r26183  
    2929    subproc = subprocess.Popen(subproc_args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    3030    outs, errs = subproc.communicate()
    31     if errs != '':
     31    if errs.decode('UTF-8') != '':
    3232        raise Exception("gmshplanet: call to gmsh failed: {}".format(errs))
    3333    gmshmajorversion = int(outs)
     
    124124    # Call gmsh
    125125    #
    126     # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 
    127     #       modifying our parsing scheme for Gmsh 4, for now, we simply set the 
     126    # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally
     127    #       modifying our parsing scheme for Gmsh 4, for now, we simply set the
    128128    #       "-format" option.
    129129    #
  • issm/trunk-jpl/src/m/miscellaneous/normfit_issm.py

    r24593 r26183  
    1010    #  check for any NaN in any columns
    1111    if not np.isnan(x).any():
    12 
    1312        #  explicitly calculate the moments
    1413        muhat = np.mean(x, 0)
    15     # numpy defaults to 0 delta degrees of freedom; matlab uses 1
    16         sigmahat = np.std(x, 0, ddof=1)
    17 
     14        # numpy defaults to 0 delta degrees of freedom; matlab uses 1
     15        # just check on lenght to avoid dividing by 0
     16        if len(x) <= 1:
     17            sigmahat = np.std(x, 0, ddof=0)
     18        else:
     19            sigmahat = np.std(x, 0, ddof=1)
    1820    # no way to ask this in python, assume 4 outputs
    1921    #if (nargout > 2):
     
    3941            sigmaci[0, :] = sigmahat
    4042            sigmaci[1, :] = sigmahat
     43
    4144    else:
    4245        #  must loop over columns, since number of elements could be different
     
    4851    #  remove any NaN and recursively call column
    4952        for j in range(np.shape(x, 1)):
    50             [muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
     53            #[muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
     54            muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
    5155
    52     return [muhat, sigmahat, muci, sigmaci]
     56    return muhat, sigmahat, np.squeeze(muci), np.squeeze(sigmaci)
     57    #return [muhat, sigmahat, muci, sigmaci]
  • issm/trunk-jpl/src/m/partition/partitioner.py

    r25165 r26183  
    4646        #partitioning essentially happens in 2D. So partition in 2D, then
    4747        #extrude the partition vector vertically.
    48         md3d = copy.deepcopy(md) # save for later
     48        md3d = copy.deepcopy(md)  # save for later
    4949        md.mesh.elements = md.mesh.elements2d
    5050        md.mesh.x = md.mesh.x2d
     
    5858    #adjacency matrix if needed:
    5959    if recomputeadjacency == 'on':
     60        print("adj calll")
    6061        md = adjacency(md)
    6162    else:
     
    6768        else:
    6869            # default method (from chaco.m)
    69             method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321]).conj().transpose()
     70            method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321])  #.conj().transpose()
    7071            method[0] = 3  #  global method (3 = inertial (geometric))
    7172            method[2] = 0  #  vertex weights (0 = off, 1 = on)
     
    8485
    8586            #  partition into nparts
     87            print("ChacoCall")
    8688            if isinstance(md.mesh, mesh2d):
    87                 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices, 1)), method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis.
     89                print("caseA")
     90                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices)), method, npart, np.array([]))[0].conj().transpose() + 1   #index partitions from 1 up. like metis.
    8891            else:
    89                 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis.
     92                print("caseB")
     93                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1  #index partitions from 1 up. like metis.
     94            print("Done")
     95
    9096    elif package == 'scotch':
    9197        if vectortype == 'element':
     
    9399        else:
    94100            #are we using weights?
    95             if m.strcmpi(options.getfieldvalue('weighting'), 'on'):
     101            if options.getfieldvalue('weighting') == 'on':
    96102                weights = np.floor(md.qmu.vertex_weight / min(md.qmu.vertex_weight))
    97103            else:
     
    99105            maptab = Scotch(md.qmu.adjacency, [], weights, [], 'cmplt', [npart])
    100106
    101             part = maptab[:, 1] + 1 #index partitions from 1 up. like metis.
     107            part = maptab[:, 1] + 1  #index partitions from 1 up. like metis.
    102108
    103109    elif package == 'linear':
     
    116122    else:
    117123        raise RuntimeError('partitioner error message: could not find {} partitioner'.format(package))
    118 
     124    print("extrude")
    119125    #extrude if we are in 3D:
    120126    if md.mesh.dimension() == 3:
  • issm/trunk-jpl/src/m/qmu/dakota_out_parse.py

    r25726 r26183  
    88from helpers import *
    99
    10 # NOTE: May be rewritten later to take advantage of Python's file I/O
    11 # mechanics. As it is written now, it is often difficult to work with, but is
    12 # analagous to the MATLAB version of dakota_out_parse.
     10#  NOTE: May be rewritten later to take advantage of Python's file I/O
     11#  mechanics. As it is written now, it is often difficult to work with, but is
     12#  analagous to the MATLAB version of dakota_out_parse.
    1313
    1414
     
    3232        prcm          (double array, partial rank correlation matrix)
    3333
    34     The filei will be prompted for if empty. The fields of dresp are particular 
    35     to the data contained within the file. The scm, pcm, srcm, and prcm are 
     34    The filei will be prompted for if empty. The fields of dresp are particular
     35    to the data contained within the file. The scm, pcm, srcm, and prcm are
    3636    output by Dakota only for the sampling methods.
    3737
    38     This function reads a Dakota .out output file and parses it into the Python 
    39     runtime. It operates in a content-driven fashion, where it skips the 
    40     intermediate data and then parses whatever output data it encounters in the 
    41     order in which it exists in the file, rather than searching for data based 
    42     on the particular method (this makes it independent of method). It also can 
     38    This function reads a Dakota .out output file and parses it into the Python
     39    runtime. It operates in a content-driven fashion, where it skips the
     40    intermediate data and then parses whatever output data it encounters in the
     41    order in which it exists in the file, rather than searching for data based
     42    on the particular method (this makes it independent of method). It also can
    4343    read and parse the .dat tabular_output file.
    4444
    45     This data would typically be used for plotting and other postprocessing 
     45    This data would typically be used for plotting and other postprocessing
    4646    within MATLAB or Excel.
    4747
    4848    TODO:
    49     - Figure out why output from Dakota is different under MATLAB and Python 
     49    - Figure out why output from Dakota is different under MATLAB and Python
    5050    (is it the input file that we write?)
    5151
    52     "Copyright 2009, by the California Institute of Technology. ALL RIGHTS 
    53     RESERVED. United States Government Sponsorship acknowledged. Any commercial 
    54     use must be negotiated with the Office of Technology Transfer at the 
     52    "Copyright 2009, by the California Institute of Technology. ALL RIGHTS
     53    RESERVED. United States Government Sponsorship acknowledged. Any commercial
     54    use must be negotiated with the Office of Technology Transfer at the
    5555    California Institute of Technology. (NTR 47078)
    5656
    57     This software may be subject to U.S. export control laws. By accepting this 
    58     software, the user agrees to comply with all applicable U.S. export laws 
    59     and regulations. User has the responsibility to obtain export licenses, or 
    60     other export authority as may be required before exporting such information 
     57    This software may be subject to U.S. export control laws. By accepting this
     58    software, the user agrees to comply with all applicable U.S. export laws
     59    and regulations. User has the responsibility to obtain export licenses, or
     60    other export authority as may be required before exporting such information
    6161    to foreign countries or providing access to foreign persons."
    6262    """
     
    7878            raise RuntimeError('File ' + filei + ' is empty')
    7979
    80         dresp = [] # of struct()
     80        dresp = []  # of struct()
    8181        scm = struct()
    8282        pcm = struct()
     
    189189    [ntokens, tokens] = fltokens(fline)
    190190
    191     if strncmpi(fline, '%eval_id interface', 18): # Dakota versions >= 6
     191    if fline.startswith('%eval_id interface'): # Dakota versions >= 6
    192192        offset = 2
    193193    else:  # Dakota versions < 6
     
    223223    print('Number of rows (Dakota func evals) = ' + str(nrow))
    224224
    225     # Calculate statistics
     225    # Calculate statistic
    226226    if (np.size(data, 0) > 1):
    227227        #dmean = mean(data)
     
    229229        [dmean, dstddev, dmeanci, dstddevci] = normfit_issm(data, 0.05)
    230230    else:
    231         dmean = np.zeros((1, np.size(data, 1)))
    232         dstddev = np.zeros((1, np.size(data, 1)))
     231        dmean = np.zeros((np.size(data, 1)))
     232        dstddev = np.zeros((np.size(data, 1)))
    233233        dmeanci = np.zeros((2, np.size(data, 1)))
    234234        dstddevci = np.zeros((2, np.size(data, 1)))
    235235        for i in range(np.size(data, 1)):
    236             [dmean[0, i], dstddev[0, i], dmeanci[:, i], dstddevci[:, i]] = normfit_issm(data[:, i], 0.05)
     236            dmean[i], dstddev[i], dmeanci[:, i], dstddevci[:, i] = normfit_issm(data[:, i], 0.05)
    237237
    238238    dmin = data.min(axis=0)
     
    244244    dmax95 = prctile_issm(data, 95, 0)
    245245
    246     # NOTE: The following line may cause the following warning (should not 
    247     # crash or invalidate results) when one of the inputs does not change with 
     246    # NOTE: The following line may cause the following warning (should not
     247    # crash or invalidate results) when one of the inputs does not change with
    248248    # respect to the other(s), causing an internal divide-by-zero error,
    249249    #
     
    487487            fline = fidi.readline()
    488488            icdf = 0
    489             while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32):
     489            while (fline != '' and not fline.isspace()) and not fline.startswith('Cumulative Distribution Function'):
    490490                [ntokens, tokens] = fltokens(fline)
    491491                icdf = icdf + 1
     
    546546            fline = fidi.readline()
    547547            ipdf = 0
    548             while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'PDF for', 7):
     548            while (fline != '' and not fline.isspace()) and not fline.startswith('PDF for'):
    549549                [ntokens, tokens] = fltokens(fline)
    550550                ipdf = ipdf + 1
     
    624624    ndresp = 0
    625625
    626     while (fline != '' and not fline.isspace()) and strncmpi(fline, 'MV Statistics for ', 18):
     626    while fline.startswith('MV Statistics for '):
    627627
    628628        #  add new response function and moments
     
    649649        dresp[-1].sens = []
    650650
    651         while (fline != '' and not fline.isspace()) and strncmpi(fline, '  Importance Factor for variable ', 33):
     651        while 'Importance Factor for variable' in fline:
    652652            [ntokens, tokens] = fltokens(fline)
    653653            idvar = idvar + 1
     
    667667            dresp[-1].impfac = []
    668668            dresp[-1].sens = []
    669             while type(fline) == str and (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
     669            while type(fline) == str and (fline != '' and not fline.isspace()) and 'Cumulative Distribution Function' not in fline and 'MV Statistics for ' not in fline and '---' not in fline:
    670670                fline = fidi.readline()
    671671
     
    682682                    fline = fidi.readline()
    683683
    684         while (fline != '' and not fline.isspace()) and strncmpi(fline, 'Cumulative Distribution Function', 32):
     684        while fline.startswith('Cumulative Distribution Function'):
    685685            [ntokens, tokens] = fltokens(fline)
    686686
     
    705705            #  read and add cdf table to response function
    706706            fline = fidi.readline()
    707             while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
     707            while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline:
    708708                [ntokens, tokens] = fltokens(fline)
    709709                icdf = icdf + 1
     
    724724            print('    Cumulative Distribution Function not available')
    725725            dresp[ndresp].cdf = []
    726             while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
     726            while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline:
    727727                fline = fidi.readline()
    728728
     
    737737
    738738    if fline is None or fline == '' or fline.isspace():
    739         fline = findline(fidi, ' < < < < < Best ')
     739        fline = findline(fidi, '<<<<< Best ')
    740740        if fline is None:
    741741            return
     
    747747    print('Reading values for best function evaluation:')
    748748
    749     while (fline != '' and not fline.isspace()) and strncmpi(fline, ' < < < < < Best ', 11):
     749    while fline.startswith('<<<<< Best '):
    750750        [ntokens, tokens] = fltokens(fline)
    751751
    752752    #  read and add best parameter(s)
    753753
    754         if strncmpi(str(tokens[2]), 'parameter', 9):
     754        if str(tokens[2]).startswith('parameter'):
    755755            print('  ' + fline)
    756756
     
    759759            dresp.best.descriptor = ''
    760760
    761             while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
     761            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
    762762                [ntokens, tokens] = fltokens(fline)
    763763                dresp.best.param.append([0])
     
    767767
    768768        #  read and add best objective function(s)
    769         elif strncmpi(str(tokens[2]), 'objective', 9) and strncmpi(str(tokens[3]), 'function', 8):
     769        elif str(tokens[2]).startswith('objective') and str(tokens[3]).startswith('function'):
    770770            print('  ' + fline)
    771771
     
    773773            dresp.best.of = []
    774774
    775             while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
     775            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
    776776                [ntokens, tokens] = fltokens(fline)
    777777                dresp.best.of.append(0)
     
    780780
    781781        #  read and add best residual norms
    782         elif strncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'norm', 4):
     782        elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('norm'):
    783783            print('  ' + fline)
    784784            dresp.best.norm = tokens[5]
     
    787787            fline = fidi.readline()
    788788
    789             while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
     789            while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<< Best ', 11):
    790790                fline = fidi.readline()
    791791
    792792        #  read and add best residual term(s)
    793         elif strncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'term', 4):
     793        elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('term'):
    794794            print('  ' + fline)
    795795
     
    797797            dresp.best.res = []
    798798
    799             while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):
     799            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
    800800                [ntokens, tokens] = fltokens(fline)
    801801                dresp.best.res.append(0)
     
    804804
    805805        #  read and add best constraint value(s)
    806         elif strncmpi(str(tokens[2]), 'constraint', 10) and strncmpi(str(tokens[3]), 'value', 5):
     806        elif str(tokens[2]).startswith('constraint') and str(tokens[3]).startswith('value'):
    807807            print('  ' + fline)
    808808
     
    810810            dresp.best.nc = []
    811811
    812             while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):
     812            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
    813813                [ntokens, tokens] = fltokens(fline)
    814814                dresp.best.nc.append(0)
     
    817817
    818818        #  read and add best data captured
    819         elif strncmpi(str(tokens[2]), 'data', 4) and strncmpi(str(tokens[3]), 'captured', 8):
     819        elif str(tokens[2]).startswith('data') and str(tokens[3]).startswith('captured'):
    820820            print('  ' + fline)
    821821            dresp.best.eval = tokens[7]
     
    823823            fline = fidi.readline()
    824824
    825             while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
     825            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
    826826                fline = fidi.readline()
    827827
     
    832832            fline = fidi.readline()
    833833
    834             while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
     834            while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
    835835                fline = fidi.readline()
    836836
     
    903903    for fline in fidi:
    904904        npos += len(fline)
    905         if (strncmpi(fline, string, len(string))):
     905        if fline.startswith(string):
    906906            if goto_line:
    907907                fidi.seek(npos, 0)
  • issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py

    r25817 r26183  
    33import numpy as np
    44from results import solution
     5from netCDF4 import Dataset
    56
    67
     
    7576# }}}
    7677
     78
    7779def parseresultsfromdiskioserial(md, filename):  # {{{
    7880    # Open file
    7981    try:
    8082        fid = open(filename, 'rb')
    81     except IOError as e:
     83    except IOError:
    8284        raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filename))
    83 
     85    print(md.results)
    8486    # Collect all results in a list
    8587    allresults = []
     
    8789        # Read next result
    8890        result = ReadData(fid, md)
    89 
    9091        if result is None:
    9192            if allresults == []:
     
    9394            else:
    9495                break
     96        # print("now got {} for time {}".format(result['fieldname'], result['time']))
     97        # print("with keys {}".format(result.keys()))
     98        # print("==============================")
    9599
    96100        allresults.append(result)
     
    119123    return results
    120124# }}}
     125
     126
     127def parseresultsfromdisktonc(md, filin, filout):  # {{{
     128    verbose = 0
     129    # Open file
     130    try:
     131        fid = open(filin, 'rb')
     132    except IOError:
     133        raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filin))
     134
     135    NCData = Dataset(filout, 'w', format='NETCDF4')
     136    UnlimDim = NCData.createDimension('Unlim', None)
     137    DimDict = {len(UnlimDim): 'Inf'}
     138    dimindex = 2
     139    dimlist = [2, md.mesh.numberofelements, md.mesh.numberofvertices, np.shape(md.mesh.elements)[1]]
     140    dimnames = ['DictDummy', 'EltNum', 'VertNum', 'VertPerElt']
     141    if verbose > 0:
     142        print('===Creating dimensions ===')
     143    for i, newdim in enumerate(dimlist):
     144        if newdim not in list(DimDict.keys()):
     145            dimindex += 1
     146            NewDim = NCData.createDimension(dimnames[i], newdim)
     147            DimDict[len(NewDim)] = dimnames[i]
     148
     149    typelist = [bool, str, int, float, complex,
     150                collections.OrderedDict,
     151                np.int64, np.ndarray, np.float64]
     152
     153    # Collect all results in a list
     154    allresults = []
     155    while True:
     156        # Read next result
     157        result = ReadData(fid, md)
     158        if result is None:
     159            if allresults == []:
     160                raise Exception('no results found in binary file ' + filename)
     161            else:
     162                break
     163        print("now got {} for time {}".format(result['fieldname'], result['time']))
     164        print("with keys {}".format(result.keys()))
     165        print("==============================")
     166
     167        allresults.append(result)
     168    fid.close()
     169
     170    # Now, process all results and find out how many steps we have
     171    numresults = len(allresults)
     172    allsteps = np.zeros((numresults, 1))
     173    for i in range(numresults):
     174        allsteps[i] = allresults[i]['step']
     175    pos = np.where(allsteps != -9999)
     176    allsteps = np.sort(np.unique(allsteps[pos]))
     177
     178    # Ok, now construct structure
     179    results = solution()
     180
     181    for i in range(numresults):
     182        result = allresults[i]
     183        index = 0
     184        if result['step'] != -9999:
     185            index = np.where(result['step'] == allsteps)[0][0]
     186            setattr(results[index], 'step', result['step'])
     187        if result['time'] != -9999:
     188            setattr(results[index], 'time', result['time'])
     189        setattr(results[index], result['fieldname'], result['field'])
     190    return results
     191# }}}
     192
    121193
    122194def ReadData(fid, md):  # {{{
     
    321393# }}}
    322394
     395
    323396def addfieldtorecord(a, descr): #{{{
    324397    if a.dtype.fields is None:
Note: See TracChangeset for help on using the changeset viewer.