Changeset 26184


Ignore:
Timestamp:
04/08/21 04:47:08 (4 years ago)
Author:
bdef
Message:

BUG:reverting unwanted changes

Location:
issm/trunk-jpl/src/m
Files:
13 edited
3 copied

Legend:

Unmodified
Added
Removed
  • issm/trunk-jpl/src/m/classes/clusters/generic.py

    r26183 r26184  
    121121
    122122                if IssmConfig('_HAVE_MPI_')[0]:
    123                     fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
    124                     #fid.write('mpiexec -np {} {} --tool=callgrind {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
     123                    fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '.
     124                              format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
    125125                else:
    126126                    fid.write('{} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '.
  • issm/trunk-jpl/src/m/classes/model.py

    r26183 r26184  
    3838from generic import generic
    3939from pfe import pfe
     40from vilje import vilje
     41from hexagon import hexagon
    4042from cyclone import cyclone
     43from stallo import stallo
    4144from saga import saga
    4245from balancethickness import balancethickness
     
    182185    def __repr__(obj):  #{{{
    183186        # TODO:
    184         # - Convert all formatting to calls to <string>.format (see any
     187        # - Convert all formatting to calls to <string>.format (see any 
    185188        #   already converted <class>.__repr__ method for examples)
    186189        #
     
    890893                    md.hydrology.__dict__[field] = project2d(md, md.hydrology.__dict__[field], 1)
    891894
     895        # Materials
    892896        md.materials.rheology_B = DepthAverage(md, md.materials.rheology_B)
    893897        md.materials.rheology_n = project2d(md, md.materials.rheology_n, 1)
  • issm/trunk-jpl/src/m/coordsystems/gmtmask.m

    r26183 r26184  
    66%
    77
    8         %are we doing a recursive call?
     8        %are we doing a recursive call? 
    99        if nargin==3,
    1010                recursive=1;
    11         else
     11        else 
    1212                recursive=0;
    1313        end
    14 
     14       
    1515        if(recursive)disp(sprintf('             recursing: num vertices %i',length(lat)));
    1616        else disp(sprintf('gmtmask: num vertices %i',length(lat)));
    1717        end
    18 
    19         %Check lat and long size is not more than 50,000; If so, recursively call gmtmask:
     18       
     19        %Check lat and long size is not more than 50,000; If so, recursively call gmtmask: 
    2020        if length(lat)>50000,
    2121                for i=1:50000:length(lat),
     
    2828                return
    2929        end
    30 
     30       
    3131        %First, write our lat,long file for gmt:
    3232        nv=length(lat);
    33         filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt'];
    34         filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt'];
     33        filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt']; 
     34        filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt']; 
    3535        dlmwrite(filename_all,[long lat (1:nv)'],'delimiter','\t','precision',10);
    36 
    37         %Avoid bypassing of the ld library path by Matlab (:()
    38         %
    39         % TODO: Do we really need this (we can/already set it in etc/environment.sh)?
    40         if ismac,
    41                 dyld_library_path_old=getenv('DYLD_LIBRARY_PATH');
    42                 setenv('DYLD_LIBRARY_PATH',[ issmdir '/externalpackages/curl/install/lib:' issmdir '/externalpackages/hdf5/install/lib:' issmdir '/externalpackages/netcdf/install/lib' ]);
    43         end
    44 
    45         %Find path to gmt, list all possible known paths to gmt (you may need to add yours to the list)
    46         gmtpaths = {[issmdir '/bin/gmt'],[issmdir '/externalpackages/gmt/install/bin/gmt'],'/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt', '/usr/bin/gmt'};
    47         gmtpath = '';
    48         for i=gmtpaths
    49                 if exist(i{1},'file'),
    50                         gmtpath = i{1};
    51                         break;
    52                 end
    53         end
    54         if isempty(gmtpath),
    55                 error('gmt not found! Make sure it is properly installed, or add its path to this file.');
    56         end
    57 
    5836
    5937        %figure out which vertices are on the ocean, which one on the continent:
     
    6341        end
    6442
    65         %reset DYLD_LIBRARY_PATH to what it was:
    66         if ismac,
    67                 setenv('DYLD_LIBRARY_PATH',dyld_library_path_old);
    68         end
    6943        %read the con_vertices.txt file and flag our mesh vertices on the continent
    7044        fid=fopen(['./' filename_oce],'r');
    71         line=fgets(fid);
     45        line=fgets(fid); 
    7246        line=fgets(fid);
    7347        oce_vertices=[];
     
    8155        mask=zeros(nv,1);
    8256        mask(oce_vertices)=1;
    83 
     57       
    8458        system(['rm -rf ./' filename_all ' ./' filename_oce ' ./gmt.history']);
    85         if ~recursive,
    86                 disp(sprintf('gmtmask: done'));
    87         end
     59        if ~recursive, disp(sprintf('gmtmask: done')); end;
  • issm/trunk-jpl/src/m/coordsystems/gmtmask.py

    r26183 r26184  
    4444    np.savetxt('./all_vertices.txt', np.transpose([long, lat, np.arange(1, nv + 1)]), delimiter='\t', fmt='%.10f')
    4545
    46     #Avoid bypassing of the ld library path by Matlab (:()
    47     issm_dir = issmdir()
    48 
    49     try:
    50         ismac
    51     except NameError:
    52         ismac = False
    53 
    54     # TODO: Do we really need this (we can/already set it in etc/environment.sh)?
    55     if ismac:
    56         dyld_library_path_old = os.getenv('DYLD_LIBRARY_PATH')
    57         os.putenv('DYLD_LIBRARY_PATH', issm_dir + '/externalpackages/curl/install/lib:' + issm_dir + '/externalpackages/hdf5/install/lib:' + issm_dir + '/externalpackages/netcdf/install/lib')
    58 
    59     #Find path to gmt (you may need to add yours to the list).
    60     #
    61     # NOTE: Assumes gmtselect is also in this directory.
    62     #
    63     gmtpaths = ['/usr/bin/gmt',
    64                 issm_dir + '/bin/gmt',
    65                 issm_dir + '/externalpackages/gmt/install/bin/gmt',
    66                 '/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt']
    67     gmtpath = ''
    68     for i in range(len(gmtpaths)):
    69         if os.path.isfile(gmtpaths[i]):
    70             gmtpath = gmtpaths[i]
    71             break
    72 
    73     if gmtpath == '':
    74         raise Exception('gmt not found! Make sure it is properly installed, or add its path to this file.')
    75 
    7646    #figure out which vertices are on the ocean, which one on the continent:
    7747    subprocess.call('gmtselect ./ all_vertices.txt -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s > ./oce_vertices.txt', shell=True)
  • issm/trunk-jpl/src/m/dev/devpath.m

    r26183 r26184  
    11% clear the last warning to focus on the warnings of the ISSM path
    2 lastwarn('');
     2lastwarn(''); 
    33
    44%Recover ISSM_DIR , or if on a Windows machine, ISSM_DIR_WIN
     
    77else
    88        ISSM_DIR=getenv('ISSM_DIR');
    9         ISSM_DEV_DIR=getenv('ISSM_DEV_DIR');
    109end
    1110
     
    1413end
    1514
    16 %Now add all issm code paths necessary to run issm smoothly.
    17 %We capture the error output, so that we can warn the user to update
    18 %the variable ISSM_DIR in this file, in case it is not correctly setup.
     15%Now add all issm code paths necessary to run issm smoothly. 
     16%We capture the error output, so that we can warn the user to update 
     17%the variable ISSM_DIR in this file, in case it is not correctly setup. 
    1918
    2019%ISSM path
    21 addpath([ISSM_DEV_DIR '/src/m/os/']); %load recursivepath
    22 addpath([ISSM_DEV_DIR '/lib']);       %load mex
    23 addpath(recursivepath([ISSM_DEV_DIR '/src/m']));
     20addpath([ISSM_DIR '/src/m/os/']); %load recursivepath
     21addpath([ISSM_DIR '/lib']);       %load MEX files
     22addpath(recursivepath([ISSM_DIR '/src/m']));
    2423addpath(recursivepath([ISSM_DIR '/externalpackages/scotch']));
    2524addpath(recursivepath([ISSM_DIR '/externalpackages/canos']));
     
    3332clear ISSM_DIR;
    3433
    35 %Check on any warning messages that might indicate that the paths were not correct.
     34%Check on any warning messages that might indicate that the paths were not correct. 
    3635if ~isempty(lastwarn),
    3736        fprintf('\n  Error trying to setup ''ISSM'' code paths. Try and update the ISSM_DIR variable in your .cshrc or .bashrc!\n');
  • issm/trunk-jpl/src/m/dev/devpath.py

    r26183 r26184  
    66#Recover ISSM_DIR and USERNAME
    77ISSM_DIR = os.getenv('ISSM_DIR')
    8 ISSM_DEV_DIR = os.getenv('ISSM_DEV_DIR')
    98USERNAME = os.getenv('USER')
    109JPL_SVN = os.getenv('JPL_SVN')
     
    1312
    1413#Go through src/m and append any directory that contains a *.py file to PATH
    15 for root, dirs, files in os.walk(ISSM_DEV_DIR + '/src/m'):
     14for root, dirs, files in os.walk(ISSM_DIR + '/src/m'):
    1615    if '.svn' in dirs:
    1716        dirs.remove('.svn')
     
    2322
    2423#Also add the Nightly run directory
    25 if ISSM_DEV_DIR + '/test/NightlyRun' not in sys.path:
    26     sys.path.append(ISSM_DEV_DIR + '/test/NightlyRun')
    27 if ISSM_DEV_DIR + '/lib' not in sys.path:
    28     sys.path.append(ISSM_DEV_DIR + '/lib')
    29 if ISSM_DEV_DIR + '/src/wrappers/python/.libs' not in sys.path:
    30     sys.path.append(ISSM_DEV_DIR + '/src/wrappers/python/.libs')
     24if ISSM_DIR + '/test/NightlyRun' not in sys.path:
     25    sys.path.append(ISSM_DIR + '/test/NightlyRun')
     26if ISSM_DIR + '/lib' not in sys.path:
     27    sys.path.append(ISSM_DIR + '/lib')
     28if ISSM_DIR + '/src/wrappers/python/.libs' not in sys.path:
     29    sys.path.append(ISSM_DIR + '/src/wrappers/python/.libs')
    3130
    3231# If using clusters, we need to have the path to the cluster settings directory
     
    4948
    5049print("\n  ISSM development path correctly loaded")
    51 print("Current path is {}\n\n".format(ISSM_DEV_DIR))
     50print("Current path is {}\n\n".format(ISSM_DIR))
  • issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.m

    r26183 r26184  
    1414%      md.mesh=gmshplanet('radius',6000,'resolution',100);
    1515%      md.mesh=gmshplanet('radius',6000,'resolution',100);
    16 
    17         %Find path to gmsh
    18         paths = {[issmdir() 'bin/gmsh'],...
    19                  [issmdir() 'externalpackages/gmsh/install/gmsh'],...
    20                  ['/usr/bin/gmsh']...
    21                 };
    22         disp(paths{1})
    23         gmshpath = '';
    24         for i=paths
    25                 if exist(i{1},'file'),
    26                         gmshpath = i{1};
    27                         break;
    28                 end
    29         end
    30         if isempty(gmshpath),
    31                 error('gmsh not found, make sure it is properly installed');
    32         end
    3316
    3417        % Get Gmsh version
     
    138121        % Call gmsh
    139122        %
    140         % NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally
    141         %               modifying our parsing scheme for Gmsh 4, for now, we simply set the
     123        % NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 
     124        %               modifying our parsing scheme for Gmsh 4, for now, we simply set the 
    142125        %               "-format" option.
    143126        %
  • issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.py

    r26183 r26184  
    2929    subproc = subprocess.Popen(subproc_args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    3030    outs, errs = subproc.communicate()
    31     if errs.decode('UTF-8') != '':
     31    if errs != '':
    3232        raise Exception("gmshplanet: call to gmsh failed: {}".format(errs))
    3333    gmshmajorversion = int(outs)
     
    124124    # Call gmsh
    125125    #
    126     # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally
    127     #       modifying our parsing scheme for Gmsh 4, for now, we simply set the
     126    # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 
     127    #       modifying our parsing scheme for Gmsh 4, for now, we simply set the 
    128128    #       "-format" option.
    129129    #
  • issm/trunk-jpl/src/m/miscellaneous/normfit_issm.py

    r26183 r26184  
    1010    #  check for any NaN in any columns
    1111    if not np.isnan(x).any():
     12
    1213        #  explicitly calculate the moments
    1314        muhat = np.mean(x, 0)
    14         # numpy defaults to 0 delta degrees of freedom; matlab uses 1
    15         # just check on lenght to avoid dividing by 0
    16         if len(x) <= 1:
    17             sigmahat = np.std(x, 0, ddof=0)
    18         else:
    19             sigmahat = np.std(x, 0, ddof=1)
     15    # numpy defaults to 0 delta degrees of freedom; matlab uses 1
     16        sigmahat = np.std(x, 0, ddof=1)
     17
    2018    # no way to ask this in python, assume 4 outputs
    2119    #if (nargout > 2):
     
    4139            sigmaci[0, :] = sigmahat
    4240            sigmaci[1, :] = sigmahat
    43 
    4441    else:
    4542        #  must loop over columns, since number of elements could be different
     
    5148    #  remove any NaN and recursively call column
    5249        for j in range(np.shape(x, 1)):
    53             #[muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
    54             muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
     50            [muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha)
    5551
    56     return muhat, sigmahat, np.squeeze(muci), np.squeeze(sigmaci)
    57     #return [muhat, sigmahat, muci, sigmaci]
     52    return [muhat, sigmahat, muci, sigmaci]
  • issm/trunk-jpl/src/m/partition/partitioner.py

    r26183 r26184  
    4646        #partitioning essentially happens in 2D. So partition in 2D, then
    4747        #extrude the partition vector vertically.
    48         md3d = copy.deepcopy(md)  # save for later
     48        md3d = copy.deepcopy(md) # save for later
    4949        md.mesh.elements = md.mesh.elements2d
    5050        md.mesh.x = md.mesh.x2d
     
    5858    #adjacency matrix if needed:
    5959    if recomputeadjacency == 'on':
    60         print("adj calll")
    6160        md = adjacency(md)
    6261    else:
     
    6867        else:
    6968            # default method (from chaco.m)
    70             method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321])  #.conj().transpose()
     69            method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321]).conj().transpose()
    7170            method[0] = 3  #  global method (3 = inertial (geometric))
    7271            method[2] = 0  #  vertex weights (0 = off, 1 = on)
     
    8584
    8685            #  partition into nparts
    87             print("ChacoCall")
    8886            if isinstance(md.mesh, mesh2d):
    89                 print("caseA")
    90                 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices)), method, npart, np.array([]))[0].conj().transpose() + 1   #index partitions from 1 up. like metis.
     87                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices, 1)), method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis.
    9188            else:
    92                 print("caseB")
    93                 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1  #index partitions from 1 up. like metis.
    94             print("Done")
    95 
     89                part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis.
    9690    elif package == 'scotch':
    9791        if vectortype == 'element':
     
    9993        else:
    10094            #are we using weights?
    101             if options.getfieldvalue('weighting') == 'on':
     95            if m.strcmpi(options.getfieldvalue('weighting'), 'on'):
    10296                weights = np.floor(md.qmu.vertex_weight / min(md.qmu.vertex_weight))
    10397            else:
     
    10599            maptab = Scotch(md.qmu.adjacency, [], weights, [], 'cmplt', [npart])
    106100
    107             part = maptab[:, 1] + 1  #index partitions from 1 up. like metis.
     101            part = maptab[:, 1] + 1 #index partitions from 1 up. like metis.
    108102
    109103    elif package == 'linear':
     
    122116    else:
    123117        raise RuntimeError('partitioner error message: could not find {} partitioner'.format(package))
    124     print("extrude")
     118
    125119    #extrude if we are in 3D:
    126120    if md.mesh.dimension() == 3:
  • issm/trunk-jpl/src/m/plot/plotmodel.py

    r26182 r26184  
    1414def plotmodel(md, *args):
    1515    '''
    16     PLOTMODEL - At command prompt, type 'plotdoc()' for additional
     16    PLOTMODEL - At command prompt, type 'plotdoc()' for additional 
    1717    documentation.
    1818
     
    3131    subplotwidth = ceil(sqrt(numberofplots))
    3232
    33     # TODO: Check that commenting this out is correct; we do not need a hold
     33    # TODO: Check that commenting this out is correct; we do not need a hold 
    3434    # under matplotlib, right?
    3535    #
     
    4040    if options.list[0].exist('nrows'):
    4141        nrows = options.list[0].getfieldvalue('nrows')
     42        nr = True
    4243    else:
    43         if options.list[0].exist('ncols'):
    44             ncols = options.list[0].getfieldvalue('ncols')
    45             nrows = np.ceil(numberofplots / ncols)
    46         else:
    47             nrows = np.ceil(numberofplots / subplotwidth)
     44        nrows = np.ceil(numberofplots / subplotwidth)
     45        nr = False
    4846
    4947    if options.list[0].exist('ncols'):
    5048        ncols = options.list[0].getfieldvalue('ncols')
    51         nrows = np.ceil(numberofplots / ncols)
     49        nc = True
    5250    else:
    5351        ncols = int(subplotwidth)
     52        nc = False
    5453    ncols = int(ncols)
    5554    nrows = int(nrows)
    5655
     56    #check that nrows and ncols were given at the same time!
     57    if nr != nc:
     58        raise Exception('plotmodel error message: nrows and ncols need to be specified together, or not at all')
     59
    5760    # Go through plots
    5861    #
    59     # NOTE: The following is where Python + matplolib differs substantially in
     62    # NOTE: The following is where Python + matplolib differs substantially in 
    6063    #       implementation and inteface from MATLAB.
    6164    #
     
    8083            plotnum = None
    8184
    82         # NOTE: The inline comments for each of the following parameters are
     85        # NOTE: The inline comments for each of the following parameters are 
    8386        #       taken from https://matplotlib.org/api/_as_gen/mpl_toolkits.axes_grid1.axes_grid.ImageGrid.html
    8487        #
    85         direction = options.list[0].getfieldvalue('direction', 'row')  # {"row", "column"}, default: "row"
    86         axes_pad = options.list[0].getfieldvalue('axes_pad', 0.25)  # float or (float, float), default : 0.02; Padding or (horizonal padding, vertical padding) between axes, in inches
    87         add_all = options.list[0].getfieldvalue('add_all', True)  # bool, default: True
    88         share_all = options.list[0].getfieldvalue('share_all', True)  # bool, default: False
    89         label_mode = options.list[0].getfieldvalue('label_mode', 'L')  # {"L", "1", "all"}, default: "L"; Determines which axes will get tick labels: "L": All axes on the left column get vertical tick labels; all axes on the bottom row get horizontal tick labels;. "1": Only the bottom left axes is labelled. "all": all axes are labelled.
     88        direction = options.list[0].getfieldvalue('direction', 'row') # {"row", "column"}, default: "row"
     89        axes_pad = options.list[0].getfieldvalue('axes_pad', 0.25) # float or (float, float), default : 0.02; Padding or (horizonal padding, vertical padding) between axes, in inches
     90        add_all = options.list[0].getfieldvalue('add_all', True) # bool, default: True
     91        share_all = options.list[0].getfieldvalue('share_all', True) # bool, default: False
     92        label_mode = options.list[0].getfieldvalue('label_mode', 'L') # {"L", "1", "all"}, default: "L"; Determines which axes will get tick labels: "L": All axes on the left column get vertical tick labels; all axes on the bottom row get horizontal tick labels;. "1": Only the bottom left axes is labelled. "all": all axes are labelled.
    9093
    9194        # Translate MATLAB colorbar mode to matplotlib
    9295        #
    9396        # TODO:
    94         # - Add 'edge' option (research if there is a corresponding option in
     97        # - Add 'edge' option (research if there is a corresponding option in 
    9598        #   MATLAB)?
    9699        #
     
    106109            raise RuntimeError('plotmodel error: colorbar mode \'{}\' is not a valid option'.format(colorbar))
    107110
    108         cbar_mode = colorbar   # {"each", "single", "edge", None }, default: None
    109         cbar_location = options.list[0].getfieldvalue('colorbarpos', 'right')   # {"left", "right", "bottom", "top"}, default: "right"
    110         cbar_pad = options.list[0].getfieldvalue('colorbarpad', 0.025)  # float, default: None
    111         cbar_size = options.list[0].getfieldvalue('colorbarsize', '5%')  # size specification (see Size.from_any), default: "5%"
     111        cbar_mode = colorbar # {"each", "single", "edge", None }, default: None
     112        cbar_location = options.list[0].getfieldvalue('colorbarpos', 'right') # {"left", "right", "bottom", "top"}, default: "right"
     113        cbar_pad = options.list[0].getfieldvalue('colorbarpad', 0.025) # float, default: None
     114        cbar_size = options.list[0].getfieldvalue('colorbarsize', '5%') # size specification (see Size.from_any), default: "5%"
    112115
    113116        # NOTE: Second parameter is:
     
    115118        #   rect(float, float, float, float) or int
    116119        #
    117         # The axes position, as a (left, bottom, width, height) tuple or as a
     120        # The axes position, as a (left, bottom, width, height) tuple or as a 
    118121        # three-digit subplot position code (e.g., "121").
    119122        #
  • issm/trunk-jpl/src/m/qmu/dakota_out_parse.py

    r26183 r26184  
    88from helpers import *
    99
    10 #  NOTE: May be rewritten later to take advantage of Python's file I/O
    11 #  mechanics. As it is written now, it is often difficult to work with, but is
    12 #  analagous to the MATLAB version of dakota_out_parse.
     10# NOTE: May be rewritten later to take advantage of Python's file I/O
     11# mechanics. As it is written now, it is often difficult to work with, but is
     12# analagous to the MATLAB version of dakota_out_parse.
    1313
    1414
     
    3232        prcm          (double array, partial rank correlation matrix)
    3333
    34     The filei will be prompted for if empty. The fields of dresp are particular
    35     to the data contained within the file. The scm, pcm, srcm, and prcm are
     34    The filei will be prompted for if empty. The fields of dresp are particular 
     35    to the data contained within the file. The scm, pcm, srcm, and prcm are 
    3636    output by Dakota only for the sampling methods.
    3737
    38     This function reads a Dakota .out output file and parses it into the Python
    39     runtime. It operates in a content-driven fashion, where it skips the
    40     intermediate data and then parses whatever output data it encounters in the
    41     order in which it exists in the file, rather than searching for data based
    42     on the particular method (this makes it independent of method). It also can
     38    This function reads a Dakota .out output file and parses it into the Python 
     39    runtime. It operates in a content-driven fashion, where it skips the 
     40    intermediate data and then parses whatever output data it encounters in the 
     41    order in which it exists in the file, rather than searching for data based 
     42    on the particular method (this makes it independent of method). It also can 
    4343    read and parse the .dat tabular_output file.
    4444
    45     This data would typically be used for plotting and other postprocessing
     45    This data would typically be used for plotting and other postprocessing 
    4646    within MATLAB or Excel.
    4747
    4848    TODO:
    49     - Figure out why output from Dakota is different under MATLAB and Python
     49    - Figure out why output from Dakota is different under MATLAB and Python 
    5050    (is it the input file that we write?)
    5151
    52     "Copyright 2009, by the California Institute of Technology. ALL RIGHTS
    53     RESERVED. United States Government Sponsorship acknowledged. Any commercial
    54     use must be negotiated with the Office of Technology Transfer at the
     52    "Copyright 2009, by the California Institute of Technology. ALL RIGHTS 
     53    RESERVED. United States Government Sponsorship acknowledged. Any commercial 
     54    use must be negotiated with the Office of Technology Transfer at the 
    5555    California Institute of Technology. (NTR 47078)
    5656
    57     This software may be subject to U.S. export control laws. By accepting this
    58     software, the user agrees to comply with all applicable U.S. export laws
    59     and regulations. User has the responsibility to obtain export licenses, or
    60     other export authority as may be required before exporting such information
     57    This software may be subject to U.S. export control laws. By accepting this 
     58    software, the user agrees to comply with all applicable U.S. export laws 
     59    and regulations. User has the responsibility to obtain export licenses, or 
     60    other export authority as may be required before exporting such information 
    6161    to foreign countries or providing access to foreign persons."
    6262    """
     
    7878            raise RuntimeError('File ' + filei + ' is empty')
    7979
    80         dresp = []  # of struct()
     80        dresp = [] # of struct()
    8181        scm = struct()
    8282        pcm = struct()
     
    189189    [ntokens, tokens] = fltokens(fline)
    190190
    191     if fline.startswith('%eval_id interface'): # Dakota versions >= 6
     191    if strncmpi(fline, '%eval_id interface', 18): # Dakota versions >= 6
    192192        offset = 2
    193193    else:  # Dakota versions < 6
     
    223223    print('Number of rows (Dakota func evals) = ' + str(nrow))
    224224
    225     # Calculate statistic
     225    # Calculate statistics
    226226    if (np.size(data, 0) > 1):
    227227        #dmean = mean(data)
     
    229229        [dmean, dstddev, dmeanci, dstddevci] = normfit_issm(data, 0.05)
    230230    else:
    231         dmean = np.zeros((np.size(data, 1)))
    232         dstddev = np.zeros((np.size(data, 1)))
     231        dmean = np.zeros((1, np.size(data, 1)))
     232        dstddev = np.zeros((1, np.size(data, 1)))
    233233        dmeanci = np.zeros((2, np.size(data, 1)))
    234234        dstddevci = np.zeros((2, np.size(data, 1)))
    235235        for i in range(np.size(data, 1)):
    236             dmean[i], dstddev[i], dmeanci[:, i], dstddevci[:, i] = normfit_issm(data[:, i], 0.05)
     236            [dmean[0, i], dstddev[0, i], dmeanci[:, i], dstddevci[:, i]] = normfit_issm(data[:, i], 0.05)
    237237
    238238    dmin = data.min(axis=0)
     
    244244    dmax95 = prctile_issm(data, 95, 0)
    245245
    246     # NOTE: The following line may cause the following warning (should not
    247     # crash or invalidate results) when one of the inputs does not change with
     246    # NOTE: The following line may cause the following warning (should not 
     247    # crash or invalidate results) when one of the inputs does not change with 
    248248    # respect to the other(s), causing an internal divide-by-zero error,
    249249    #
     
    487487            fline = fidi.readline()
    488488            icdf = 0
    489             while (fline != '' and not fline.isspace()) and not fline.startswith('Cumulative Distribution Function'):
     489            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32):
    490490                [ntokens, tokens] = fltokens(fline)
    491491                icdf = icdf + 1
     
    546546            fline = fidi.readline()
    547547            ipdf = 0
    548             while (fline != '' and not fline.isspace()) and not fline.startswith('PDF for'):
     548            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'PDF for', 7):
    549549                [ntokens, tokens] = fltokens(fline)
    550550                ipdf = ipdf + 1
     
    624624    ndresp = 0
    625625
    626     while fline.startswith('MV Statistics for '):
     626    while (fline != '' and not fline.isspace()) and strncmpi(fline, 'MV Statistics for ', 18):
    627627
    628628        #  add new response function and moments
     
    649649        dresp[-1].sens = []
    650650
    651         while 'Importance Factor for variable' in fline:
     651        while (fline != '' and not fline.isspace()) and strncmpi(fline, '  Importance Factor for variable ', 33):
    652652            [ntokens, tokens] = fltokens(fline)
    653653            idvar = idvar + 1
     
    667667            dresp[-1].impfac = []
    668668            dresp[-1].sens = []
    669             while type(fline) == str and (fline != '' and not fline.isspace()) and 'Cumulative Distribution Function' not in fline and 'MV Statistics for ' not in fline and '---' not in fline:
     669            while type(fline) == str and (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
    670670                fline = fidi.readline()
    671671
     
    682682                    fline = fidi.readline()
    683683
    684         while fline.startswith('Cumulative Distribution Function'):
     684        while (fline != '' and not fline.isspace()) and strncmpi(fline, 'Cumulative Distribution Function', 32):
    685685            [ntokens, tokens] = fltokens(fline)
    686686
     
    705705            #  read and add cdf table to response function
    706706            fline = fidi.readline()
    707             while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline:
     707            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
    708708                [ntokens, tokens] = fltokens(fline)
    709709                icdf = icdf + 1
     
    724724            print('    Cumulative Distribution Function not available')
    725725            dresp[ndresp].cdf = []
    726             while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline:
     726            while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):
    727727                fline = fidi.readline()
    728728
     
    737737
    738738    if fline is None or fline == '' or fline.isspace():
    739         fline = findline(fidi, '<<<<< Best ')
     739        fline = findline(fidi, ' < < < < < Best ')
    740740        if fline is None:
    741741            return
     
    747747    print('Reading values for best function evaluation:')
    748748
    749     while fline.startswith('<<<<< Best '):
     749    while (fline != '' and not fline.isspace()) and strncmpi(fline, ' < < < < < Best ', 11):
    750750        [ntokens, tokens] = fltokens(fline)
    751751
    752752    #  read and add best parameter(s)
    753753
    754         if str(tokens[2]).startswith('parameter'):
     754        if strncmpi(str(tokens[2]), 'parameter', 9):
    755755            print('  ' + fline)
    756756
     
    759759            dresp.best.descriptor = ''
    760760
    761             while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
     761            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
    762762                [ntokens, tokens] = fltokens(fline)
    763763                dresp.best.param.append([0])
     
    767767
    768768        #  read and add best objective function(s)
    769         elif str(tokens[2]).startswith('objective') and str(tokens[3]).startswith('function'):
     769        elif strncmpi(str(tokens[2]), 'objective', 9) and strncmpi(str(tokens[3]), 'function', 8):
    770770            print('  ' + fline)
    771771
     
    773773            dresp.best.of = []
    774774
    775             while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
     775            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
    776776                [ntokens, tokens] = fltokens(fline)
    777777                dresp.best.of.append(0)
     
    780780
    781781        #  read and add best residual norms
    782         elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('norm'):
     782        elif strncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'norm', 4):
    783783            print('  ' + fline)
    784784            dresp.best.norm = tokens[5]
     
    787787            fline = fidi.readline()
    788788
    789             while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<< Best ', 11):
     789            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
    790790                fline = fidi.readline()
    791791
    792792        #  read and add best residual term(s)
    793         elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('term'):
     793        elif strncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'term', 4):
    794794            print('  ' + fline)
    795795
     
    797797            dresp.best.res = []
    798798
    799             while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
     799            while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):
    800800                [ntokens, tokens] = fltokens(fline)
    801801                dresp.best.res.append(0)
     
    804804
    805805        #  read and add best constraint value(s)
    806         elif str(tokens[2]).startswith('constraint') and str(tokens[3]).startswith('value'):
     806        elif strncmpi(str(tokens[2]), 'constraint', 10) and strncmpi(str(tokens[3]), 'value', 5):
    807807            print('  ' + fline)
    808808
     
    810810            dresp.best.nc = []
    811811
    812             while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
     812            while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):
    813813                [ntokens, tokens] = fltokens(fline)
    814814                dresp.best.nc.append(0)
     
    817817
    818818        #  read and add best data captured
    819         elif str(tokens[2]).startswith('data') and str(tokens[3]).startswith('captured'):
     819        elif strncmpi(str(tokens[2]), 'data', 4) and strncmpi(str(tokens[3]), 'captured', 8):
    820820            print('  ' + fline)
    821821            dresp.best.eval = tokens[7]
     
    823823            fline = fidi.readline()
    824824
    825             while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
     825            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
    826826                fline = fidi.readline()
    827827
     
    832832            fline = fidi.readline()
    833833
    834             while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '):
     834            while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):
    835835                fline = fidi.readline()
    836836
     
    903903    for fline in fidi:
    904904        npos += len(fline)
    905         if fline.startswith(string):
     905        if (strncmpi(fline, string, len(string))):
    906906            if goto_line:
    907907                fidi.seek(npos, 0)
  • issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py

    r26183 r26184  
    33import numpy as np
    44from results import solution
    5 from netCDF4 import Dataset
    65
    76
     
    7675# }}}
    7776
    78 
    7977def parseresultsfromdiskioserial(md, filename):  # {{{
    8078    # Open file
    8179    try:
    8280        fid = open(filename, 'rb')
    83     except IOError:
     81    except IOError as e:
    8482        raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filename))
    85     print(md.results)
     83
    8684    # Collect all results in a list
    8785    allresults = []
     
    8987        # Read next result
    9088        result = ReadData(fid, md)
     89
    9190        if result is None:
    9291            if allresults == []:
     
    9493            else:
    9594                break
    96         # print("now got {} for time {}".format(result['fieldname'], result['time']))
    97         # print("with keys {}".format(result.keys()))
    98         # print("==============================")
    9995
    10096        allresults.append(result)
     
    123119    return results
    124120# }}}
    125 
    126 
    127 def parseresultsfromdisktonc(md, filin, filout):  # {{{
    128     verbose = 0
    129     # Open file
    130     try:
    131         fid = open(filin, 'rb')
    132     except IOError:
    133         raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filin))
    134 
    135     NCData = Dataset(filout, 'w', format='NETCDF4')
    136     UnlimDim = NCData.createDimension('Unlim', None)
    137     DimDict = {len(UnlimDim): 'Inf'}
    138     dimindex = 2
    139     dimlist = [2, md.mesh.numberofelements, md.mesh.numberofvertices, np.shape(md.mesh.elements)[1]]
    140     dimnames = ['DictDummy', 'EltNum', 'VertNum', 'VertPerElt']
    141     if verbose > 0:
    142         print('===Creating dimensions ===')
    143     for i, newdim in enumerate(dimlist):
    144         if newdim not in list(DimDict.keys()):
    145             dimindex += 1
    146             NewDim = NCData.createDimension(dimnames[i], newdim)
    147             DimDict[len(NewDim)] = dimnames[i]
    148 
    149     typelist = [bool, str, int, float, complex,
    150                 collections.OrderedDict,
    151                 np.int64, np.ndarray, np.float64]
    152 
    153     # Collect all results in a list
    154     allresults = []
    155     while True:
    156         # Read next result
    157         result = ReadData(fid, md)
    158         if result is None:
    159             if allresults == []:
    160                 raise Exception('no results found in binary file ' + filename)
    161             else:
    162                 break
    163         print("now got {} for time {}".format(result['fieldname'], result['time']))
    164         print("with keys {}".format(result.keys()))
    165         print("==============================")
    166 
    167         allresults.append(result)
    168     fid.close()
    169 
    170     # Now, process all results and find out how many steps we have
    171     numresults = len(allresults)
    172     allsteps = np.zeros((numresults, 1))
    173     for i in range(numresults):
    174         allsteps[i] = allresults[i]['step']
    175     pos = np.where(allsteps != -9999)
    176     allsteps = np.sort(np.unique(allsteps[pos]))
    177 
    178     # Ok, now construct structure
    179     results = solution()
    180 
    181     for i in range(numresults):
    182         result = allresults[i]
    183         index = 0
    184         if result['step'] != -9999:
    185             index = np.where(result['step'] == allsteps)[0][0]
    186             setattr(results[index], 'step', result['step'])
    187         if result['time'] != -9999:
    188             setattr(results[index], 'time', result['time'])
    189         setattr(results[index], result['fieldname'], result['field'])
    190     return results
    191 # }}}
    192 
    193121
    194122def ReadData(fid, md):  # {{{
     
    393321# }}}
    394322
    395 
    396323def addfieldtorecord(a, descr): #{{{
    397324    if a.dtype.fields is None:
Note: See TracChangeset for help on using the changeset viewer.