Changeset 26183
- Timestamp:
- 04/08/21 03:57:34 (4 years ago)
- Location:
- issm/trunk-jpl/src/m
- Files:
-
- 3 deleted
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/classes/clusters/generic.py
r25758 r26183 121 121 122 122 if IssmConfig('_HAVE_MPI_')[0]: 123 fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '.124 format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))123 fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname)) 124 #fid.write('mpiexec -np {} {} --tool=callgrind {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.format(self.np, self.valgrind, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname)) 125 125 else: 126 126 fid.write('{} --leak-check=full {} {}/{} {} {}/{} {} 2> {}.errlog > {}.outlog '. -
issm/trunk-jpl/src/m/classes/model.py
r26059 r26183 38 38 from generic import generic 39 39 from pfe import pfe 40 from vilje import vilje41 from hexagon import hexagon42 40 from cyclone import cyclone 43 from stallo import stallo44 41 from saga import saga 45 42 from balancethickness import balancethickness … … 185 182 def __repr__(obj): #{{{ 186 183 # TODO: 187 # - Convert all formatting to calls to <string>.format (see any 184 # - Convert all formatting to calls to <string>.format (see any 188 185 # already converted <class>.__repr__ method for examples) 189 186 # … … 893 890 md.hydrology.__dict__[field] = project2d(md, md.hydrology.__dict__[field], 1) 894 891 895 # Materials896 892 md.materials.rheology_B = DepthAverage(md, md.materials.rheology_B) 897 893 md.materials.rheology_n = project2d(md, md.materials.rheology_n, 1) -
issm/trunk-jpl/src/m/coordsystems/gmtmask.m
r25860 r26183 6 6 % 7 7 8 %are we doing a recursive call? 8 %are we doing a recursive call? 9 9 if nargin==3, 10 10 recursive=1; 11 else 11 else 12 12 recursive=0; 13 13 end 14 14 15 15 if(recursive)disp(sprintf(' recursing: num vertices %i',length(lat))); 16 16 else disp(sprintf('gmtmask: num vertices %i',length(lat))); 17 17 end 18 19 %Check lat and long size is not more than 50,000; If so, recursively call gmtmask: 18 19 %Check lat and long size is not more than 50,000; If so, recursively call gmtmask: 20 20 if length(lat)>50000, 21 21 for i=1:50000:length(lat), … … 28 28 return 29 29 end 30 30 31 31 %First, write our lat,long file for gmt: 32 32 nv=length(lat); 33 filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt']; 34 filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt']; 33 filename_all = ['all_vertices-' num2str(feature('GetPid')) '.txt']; 34 filename_oce = ['oce_vertices-' num2str(feature('GetPid')) '.txt']; 35 35 dlmwrite(filename_all,[long lat (1:nv)'],'delimiter','\t','precision',10); 36 37 %Avoid bypassing of the ld library path by Matlab (:() 38 % 39 % TODO: Do we really need this (we can/already set it in etc/environment.sh)? 40 if ismac, 41 dyld_library_path_old=getenv('DYLD_LIBRARY_PATH'); 42 setenv('DYLD_LIBRARY_PATH',[ issmdir '/externalpackages/curl/install/lib:' issmdir '/externalpackages/hdf5/install/lib:' issmdir '/externalpackages/netcdf/install/lib' ]); 43 end 44 45 %Find path to gmt, list all possible known paths to gmt (you may need to add yours to the list) 46 gmtpaths = {[issmdir '/bin/gmt'],[issmdir '/externalpackages/gmt/install/bin/gmt'],'/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt', '/usr/bin/gmt'}; 47 gmtpath = ''; 48 for i=gmtpaths 49 if exist(i{1},'file'), 50 gmtpath = i{1}; 51 break; 52 end 53 end 54 if isempty(gmtpath), 55 error('gmt not found! Make sure it is properly installed, or add its path to this file.'); 56 end 57 36 58 37 59 %figure out which vertices are on the ocean, which one on the continent: … … 41 63 end 42 64 65 %reset DYLD_LIBRARY_PATH to what it was: 66 if ismac, 67 setenv('DYLD_LIBRARY_PATH',dyld_library_path_old); 68 end 43 69 %read the con_vertices.txt file and flag our mesh vertices on the continent 44 70 fid=fopen(['./' filename_oce],'r'); 45 line=fgets(fid); 71 line=fgets(fid); 46 72 line=fgets(fid); 47 73 oce_vertices=[]; … … 55 81 mask=zeros(nv,1); 56 82 mask(oce_vertices)=1; 57 83 58 84 system(['rm -rf ./' filename_all ' ./' filename_oce ' ./gmt.history']); 59 if ~recursive, disp(sprintf('gmtmask: done')); end; 85 if ~recursive, 86 disp(sprintf('gmtmask: done')); 87 end -
issm/trunk-jpl/src/m/coordsystems/gmtmask.py
r25860 r26183 44 44 np.savetxt('./all_vertices.txt', np.transpose([long, lat, np.arange(1, nv + 1)]), delimiter='\t', fmt='%.10f') 45 45 46 #Avoid bypassing of the ld library path by Matlab (:() 47 issm_dir = issmdir() 48 49 try: 50 ismac 51 except NameError: 52 ismac = False 53 54 # TODO: Do we really need this (we can/already set it in etc/environment.sh)? 55 if ismac: 56 dyld_library_path_old = os.getenv('DYLD_LIBRARY_PATH') 57 os.putenv('DYLD_LIBRARY_PATH', issm_dir + '/externalpackages/curl/install/lib:' + issm_dir + '/externalpackages/hdf5/install/lib:' + issm_dir + '/externalpackages/netcdf/install/lib') 58 59 #Find path to gmt (you may need to add yours to the list). 60 # 61 # NOTE: Assumes gmtselect is also in this directory. 62 # 63 gmtpaths = ['/usr/bin/gmt', 64 issm_dir + '/bin/gmt', 65 issm_dir + '/externalpackages/gmt/install/bin/gmt', 66 '/Applications/GMT-5.4.3.app/Contents/Resources/bin/gmt'] 67 gmtpath = '' 68 for i in range(len(gmtpaths)): 69 if os.path.isfile(gmtpaths[i]): 70 gmtpath = gmtpaths[i] 71 break 72 73 if gmtpath == '': 74 raise Exception('gmt not found! Make sure it is properly installed, or add its path to this file.') 75 46 76 #figure out which vertices are on the ocean, which one on the continent: 47 77 subprocess.call('gmtselect ./ all_vertices.txt -h0 -Df -R0/360/-90/90 -A0 -JQ180/200 -Nk/s/s/k/s > ./oce_vertices.txt', shell=True) -
issm/trunk-jpl/src/m/dev/devpath.m
r26125 r26183 1 1 % clear the last warning to focus on the warnings of the ISSM path 2 lastwarn(''); 2 lastwarn(''); 3 3 4 4 %Recover ISSM_DIR , or if on a Windows machine, ISSM_DIR_WIN … … 7 7 else 8 8 ISSM_DIR=getenv('ISSM_DIR'); 9 ISSM_DEV_DIR=getenv('ISSM_DEV_DIR'); 9 10 end 10 11 … … 13 14 end 14 15 15 %Now add all issm code paths necessary to run issm smoothly. 16 %We capture the error output, so that we can warn the user to update 17 %the variable ISSM_DIR in this file, in case it is not correctly setup. 16 %Now add all issm code paths necessary to run issm smoothly. 17 %We capture the error output, so that we can warn the user to update 18 %the variable ISSM_DIR in this file, in case it is not correctly setup. 18 19 19 20 %ISSM path 20 addpath([ISSM_D IR '/src/m/os/']); %load recursivepath21 addpath([ISSM_D IR '/lib']); %load MEX files22 addpath(recursivepath([ISSM_D IR '/src/m']));21 addpath([ISSM_DEV_DIR '/src/m/os/']); %load recursivepath 22 addpath([ISSM_DEV_DIR '/lib']); %load mex 23 addpath(recursivepath([ISSM_DEV_DIR '/src/m'])); 23 24 addpath(recursivepath([ISSM_DIR '/externalpackages/scotch'])); 24 25 addpath(recursivepath([ISSM_DIR '/externalpackages/canos'])); … … 32 33 clear ISSM_DIR; 33 34 34 %Check on any warning messages that might indicate that the paths were not correct. 35 %Check on any warning messages that might indicate that the paths were not correct. 35 36 if ~isempty(lastwarn), 36 37 fprintf('\n Error trying to setup ''ISSM'' code paths. Try and update the ISSM_DIR variable in your .cshrc or .bashrc!\n'); -
issm/trunk-jpl/src/m/dev/devpath.py
r25684 r26183 6 6 #Recover ISSM_DIR and USERNAME 7 7 ISSM_DIR = os.getenv('ISSM_DIR') 8 ISSM_DEV_DIR = os.getenv('ISSM_DEV_DIR') 8 9 USERNAME = os.getenv('USER') 9 10 JPL_SVN = os.getenv('JPL_SVN') … … 12 13 13 14 #Go through src/m and append any directory that contains a *.py file to PATH 14 for root, dirs, files in os.walk(ISSM_D IR + '/src/m'):15 for root, dirs, files in os.walk(ISSM_DEV_DIR + '/src/m'): 15 16 if '.svn' in dirs: 16 17 dirs.remove('.svn') … … 22 23 23 24 #Also add the Nightly run directory 24 if ISSM_D IR + '/test/NightlyRun' not in sys.path:25 sys.path.append(ISSM_D IR + '/test/NightlyRun')26 if ISSM_D IR + '/lib' not in sys.path:27 sys.path.append(ISSM_D IR + '/lib')28 if ISSM_D IR + '/src/wrappers/python/.libs' not in sys.path:29 sys.path.append(ISSM_D IR + '/src/wrappers/python/.libs')25 if ISSM_DEV_DIR + '/test/NightlyRun' not in sys.path: 26 sys.path.append(ISSM_DEV_DIR + '/test/NightlyRun') 27 if ISSM_DEV_DIR + '/lib' not in sys.path: 28 sys.path.append(ISSM_DEV_DIR + '/lib') 29 if ISSM_DEV_DIR + '/src/wrappers/python/.libs' not in sys.path: 30 sys.path.append(ISSM_DEV_DIR + '/src/wrappers/python/.libs') 30 31 31 32 # If using clusters, we need to have the path to the cluster settings directory … … 48 49 49 50 print("\n ISSM development path correctly loaded") 50 print("Current path is {}\n\n".format(ISSM_D IR))51 print("Current path is {}\n\n".format(ISSM_DEV_DIR)) -
issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.m
r25860 r26183 14 14 % md.mesh=gmshplanet('radius',6000,'resolution',100); 15 15 % md.mesh=gmshplanet('radius',6000,'resolution',100); 16 17 %Find path to gmsh 18 paths = {[issmdir() 'bin/gmsh'],... 19 [issmdir() 'externalpackages/gmsh/install/gmsh'],... 20 ['/usr/bin/gmsh']... 21 }; 22 disp(paths{1}) 23 gmshpath = ''; 24 for i=paths 25 if exist(i{1},'file'), 26 gmshpath = i{1}; 27 break; 28 end 29 end 30 if isempty(gmshpath), 31 error('gmsh not found, make sure it is properly installed'); 32 end 16 33 17 34 % Get Gmsh version … … 121 138 % Call gmsh 122 139 % 123 % NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 124 % modifying our parsing scheme for Gmsh 4, for now, we simply set the 140 % NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 141 % modifying our parsing scheme for Gmsh 4, for now, we simply set the 125 142 % "-format" option. 126 143 % -
issm/trunk-jpl/src/m/mesh/planet/gmsh/gmshplanet.py
r25344 r26183 29 29 subproc = subprocess.Popen(subproc_args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 30 30 outs, errs = subproc.communicate() 31 if errs != '':31 if errs.decode('UTF-8') != '': 32 32 raise Exception("gmshplanet: call to gmsh failed: {}".format(errs)) 33 33 gmshmajorversion = int(outs) … … 124 124 # Call gmsh 125 125 # 126 # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 127 # modifying our parsing scheme for Gmsh 4, for now, we simply set the 126 # NOTE: The default format in Gmsh 3 is "msh2". Rather than conditionally 127 # modifying our parsing scheme for Gmsh 4, for now, we simply set the 128 128 # "-format" option. 129 129 # -
issm/trunk-jpl/src/m/miscellaneous/normfit_issm.py
r24593 r26183 10 10 # check for any NaN in any columns 11 11 if not np.isnan(x).any(): 12 13 12 # explicitly calculate the moments 14 13 muhat = np.mean(x, 0) 15 # numpy defaults to 0 delta degrees of freedom; matlab uses 1 16 sigmahat = np.std(x, 0, ddof=1) 17 14 # numpy defaults to 0 delta degrees of freedom; matlab uses 1 15 # just check on lenght to avoid dividing by 0 16 if len(x) <= 1: 17 sigmahat = np.std(x, 0, ddof=0) 18 else: 19 sigmahat = np.std(x, 0, ddof=1) 18 20 # no way to ask this in python, assume 4 outputs 19 21 #if (nargout > 2): … … 39 41 sigmaci[0, :] = sigmahat 40 42 sigmaci[1, :] = sigmahat 43 41 44 else: 42 45 # must loop over columns, since number of elements could be different … … 48 51 # remove any NaN and recursively call column 49 52 for j in range(np.shape(x, 1)): 50 [muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha) 53 #[muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j]] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha) 54 muhat[j], sigmahat[j], muci[:, j], sigmaci[:, j] = normfit_issm(x[not np.isnan(x[:, j]), j], alpha) 51 55 52 return [muhat, sigmahat, muci, sigmaci] 56 return muhat, sigmahat, np.squeeze(muci), np.squeeze(sigmaci) 57 #return [muhat, sigmahat, muci, sigmaci] -
issm/trunk-jpl/src/m/partition/partitioner.py
r25165 r26183 46 46 #partitioning essentially happens in 2D. So partition in 2D, then 47 47 #extrude the partition vector vertically. 48 md3d = copy.deepcopy(md) # save for later48 md3d = copy.deepcopy(md) # save for later 49 49 md.mesh.elements = md.mesh.elements2d 50 50 md.mesh.x = md.mesh.x2d … … 58 58 #adjacency matrix if needed: 59 59 if recomputeadjacency == 'on': 60 print("adj calll") 60 61 md = adjacency(md) 61 62 else: … … 67 68 else: 68 69 # default method (from chaco.m) 69 method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321]) .conj().transpose()70 method = np.array([1, 1, 0, 0, 1, 1, 50, 0, 0.001, 7654321]) #.conj().transpose() 70 71 method[0] = 3 # global method (3 = inertial (geometric)) 71 72 method[2] = 0 # vertex weights (0 = off, 1 = on) … … 84 85 85 86 # partition into nparts 87 print("ChacoCall") 86 88 if isinstance(md.mesh, mesh2d): 87 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices, 1)), method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis. 89 print("caseA") 90 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, np.zeros((md.mesh.numberofvertices)), method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis. 88 91 else: 89 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis. 92 print("caseB") 93 part = Chaco(md.qmu.adjacency, weights, np.array([]), md.mesh.x, md.mesh.y, md.mesh.z, method, npart, np.array([]))[0].conj().transpose() + 1 #index partitions from 1 up. like metis. 94 print("Done") 95 90 96 elif package == 'scotch': 91 97 if vectortype == 'element': … … 93 99 else: 94 100 #are we using weights? 95 if m.strcmpi(options.getfieldvalue('weighting'), 'on'):101 if options.getfieldvalue('weighting') == 'on': 96 102 weights = np.floor(md.qmu.vertex_weight / min(md.qmu.vertex_weight)) 97 103 else: … … 99 105 maptab = Scotch(md.qmu.adjacency, [], weights, [], 'cmplt', [npart]) 100 106 101 part = maptab[:, 1] + 1 #index partitions from 1 up. like metis.107 part = maptab[:, 1] + 1 #index partitions from 1 up. like metis. 102 108 103 109 elif package == 'linear': … … 116 122 else: 117 123 raise RuntimeError('partitioner error message: could not find {} partitioner'.format(package)) 118 124 print("extrude") 119 125 #extrude if we are in 3D: 120 126 if md.mesh.dimension() == 3: -
issm/trunk-jpl/src/m/qmu/dakota_out_parse.py
r25726 r26183 8 8 from helpers import * 9 9 10 # NOTE: May be rewritten later to take advantage of Python's file I/O11 # mechanics. As it is written now, it is often difficult to work with, but is12 # analagous to the MATLAB version of dakota_out_parse.10 # NOTE: May be rewritten later to take advantage of Python's file I/O 11 # mechanics. As it is written now, it is often difficult to work with, but is 12 # analagous to the MATLAB version of dakota_out_parse. 13 13 14 14 … … 32 32 prcm (double array, partial rank correlation matrix) 33 33 34 The filei will be prompted for if empty. The fields of dresp are particular 35 to the data contained within the file. The scm, pcm, srcm, and prcm are 34 The filei will be prompted for if empty. The fields of dresp are particular 35 to the data contained within the file. The scm, pcm, srcm, and prcm are 36 36 output by Dakota only for the sampling methods. 37 37 38 This function reads a Dakota .out output file and parses it into the Python 39 runtime. It operates in a content-driven fashion, where it skips the 40 intermediate data and then parses whatever output data it encounters in the 41 order in which it exists in the file, rather than searching for data based 42 on the particular method (this makes it independent of method). It also can 38 This function reads a Dakota .out output file and parses it into the Python 39 runtime. It operates in a content-driven fashion, where it skips the 40 intermediate data and then parses whatever output data it encounters in the 41 order in which it exists in the file, rather than searching for data based 42 on the particular method (this makes it independent of method). It also can 43 43 read and parse the .dat tabular_output file. 44 44 45 This data would typically be used for plotting and other postprocessing 45 This data would typically be used for plotting and other postprocessing 46 46 within MATLAB or Excel. 47 47 48 48 TODO: 49 - Figure out why output from Dakota is different under MATLAB and Python 49 - Figure out why output from Dakota is different under MATLAB and Python 50 50 (is it the input file that we write?) 51 51 52 "Copyright 2009, by the California Institute of Technology. ALL RIGHTS 53 RESERVED. United States Government Sponsorship acknowledged. Any commercial 54 use must be negotiated with the Office of Technology Transfer at the 52 "Copyright 2009, by the California Institute of Technology. ALL RIGHTS 53 RESERVED. United States Government Sponsorship acknowledged. Any commercial 54 use must be negotiated with the Office of Technology Transfer at the 55 55 California Institute of Technology. (NTR 47078) 56 56 57 This software may be subject to U.S. export control laws. By accepting this 58 software, the user agrees to comply with all applicable U.S. export laws 59 and regulations. User has the responsibility to obtain export licenses, or 60 other export authority as may be required before exporting such information 57 This software may be subject to U.S. export control laws. By accepting this 58 software, the user agrees to comply with all applicable U.S. export laws 59 and regulations. User has the responsibility to obtain export licenses, or 60 other export authority as may be required before exporting such information 61 61 to foreign countries or providing access to foreign persons." 62 62 """ … … 78 78 raise RuntimeError('File ' + filei + ' is empty') 79 79 80 dresp = [] # of struct()80 dresp = [] # of struct() 81 81 scm = struct() 82 82 pcm = struct() … … 189 189 [ntokens, tokens] = fltokens(fline) 190 190 191 if strncmpi(fline, '%eval_id interface', 18):# Dakota versions >= 6191 if fline.startswith('%eval_id interface'): # Dakota versions >= 6 192 192 offset = 2 193 193 else: # Dakota versions < 6 … … 223 223 print('Number of rows (Dakota func evals) = ' + str(nrow)) 224 224 225 # Calculate statistic s225 # Calculate statistic 226 226 if (np.size(data, 0) > 1): 227 227 #dmean = mean(data) … … 229 229 [dmean, dstddev, dmeanci, dstddevci] = normfit_issm(data, 0.05) 230 230 else: 231 dmean = np.zeros(( 1,np.size(data, 1)))232 dstddev = np.zeros(( 1,np.size(data, 1)))231 dmean = np.zeros((np.size(data, 1))) 232 dstddev = np.zeros((np.size(data, 1))) 233 233 dmeanci = np.zeros((2, np.size(data, 1))) 234 234 dstddevci = np.zeros((2, np.size(data, 1))) 235 235 for i in range(np.size(data, 1)): 236 [dmean[0, i], dstddev[0, i], dmeanci[:, i], dstddevci[:, i]] = normfit_issm(data[:, i], 0.05)236 dmean[i], dstddev[i], dmeanci[:, i], dstddevci[:, i] = normfit_issm(data[:, i], 0.05) 237 237 238 238 dmin = data.min(axis=0) … … 244 244 dmax95 = prctile_issm(data, 95, 0) 245 245 246 # NOTE: The following line may cause the following warning (should not 247 # crash or invalidate results) when one of the inputs does not change with 246 # NOTE: The following line may cause the following warning (should not 247 # crash or invalidate results) when one of the inputs does not change with 248 248 # respect to the other(s), causing an internal divide-by-zero error, 249 249 # … … 487 487 fline = fidi.readline() 488 488 icdf = 0 489 while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32):489 while (fline != '' and not fline.isspace()) and not fline.startswith('Cumulative Distribution Function'): 490 490 [ntokens, tokens] = fltokens(fline) 491 491 icdf = icdf + 1 … … 546 546 fline = fidi.readline() 547 547 ipdf = 0 548 while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'PDF for', 7):548 while (fline != '' and not fline.isspace()) and not fline.startswith('PDF for'): 549 549 [ntokens, tokens] = fltokens(fline) 550 550 ipdf = ipdf + 1 … … 624 624 ndresp = 0 625 625 626 while (fline != '' and not fline.isspace()) and strncmpi(fline, 'MV Statistics for ', 18):626 while fline.startswith('MV Statistics for '): 627 627 628 628 # add new response function and moments … … 649 649 dresp[-1].sens = [] 650 650 651 while (fline != '' and not fline.isspace()) and strncmpi(fline, ' Importance Factor for variable ', 33):651 while 'Importance Factor for variable' in fline: 652 652 [ntokens, tokens] = fltokens(fline) 653 653 idvar = idvar + 1 … … 667 667 dresp[-1].impfac = [] 668 668 dresp[-1].sens = [] 669 while type(fline) == str and (fline != '' and not fline.isspace()) and not strncmpi(fline, 'Cumulative Distribution Function', 32) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):669 while type(fline) == str and (fline != '' and not fline.isspace()) and 'Cumulative Distribution Function' not in fline and 'MV Statistics for ' not in fline and '---' not in fline: 670 670 fline = fidi.readline() 671 671 … … 682 682 fline = fidi.readline() 683 683 684 while (fline != '' and not fline.isspace()) and strncmpi(fline, 'Cumulative Distribution Function', 32):684 while fline.startswith('Cumulative Distribution Function'): 685 685 [ntokens, tokens] = fltokens(fline) 686 686 … … 705 705 # read and add cdf table to response function 706 706 fline = fidi.readline() 707 while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):707 while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline: 708 708 [ntokens, tokens] = fltokens(fline) 709 709 icdf = icdf + 1 … … 724 724 print(' Cumulative Distribution Function not available') 725 725 dresp[ndresp].cdf = [] 726 while (fline != '' and not fline.isspace()) and not strncmpi(fline, 'MV Statistics for ', 18) and not strncmp(fline, ' - ', 1):726 while (fline != '' and not fline.isspace()) and not fline.startswith('MV Statistics for ') and '---' not in fline: 727 727 fline = fidi.readline() 728 728 … … 737 737 738 738 if fline is None or fline == '' or fline.isspace(): 739 fline = findline(fidi, ' < < < << Best ')739 fline = findline(fidi, '<<<<< Best ') 740 740 if fline is None: 741 741 return … … 747 747 print('Reading values for best function evaluation:') 748 748 749 while (fline != '' and not fline.isspace()) and strncmpi(fline, ' < < < < < Best ', 11):749 while fline.startswith('<<<<< Best '): 750 750 [ntokens, tokens] = fltokens(fline) 751 751 752 752 # read and add best parameter(s) 753 753 754 if str ncmpi(str(tokens[2]), 'parameter', 9):754 if str(tokens[2]).startswith('parameter'): 755 755 print(' ' + fline) 756 756 … … 759 759 dresp.best.descriptor = '' 760 760 761 while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):761 while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '): 762 762 [ntokens, tokens] = fltokens(fline) 763 763 dresp.best.param.append([0]) … … 767 767 768 768 # read and add best objective function(s) 769 elif str ncmpi(str(tokens[2]), 'objective', 9) and strncmpi(str(tokens[3]), 'function', 8):769 elif str(tokens[2]).startswith('objective') and str(tokens[3]).startswith('function'): 770 770 print(' ' + fline) 771 771 … … 773 773 dresp.best.of = [] 774 774 775 while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):775 while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '): 776 776 [ntokens, tokens] = fltokens(fline) 777 777 dresp.best.of.append(0) … … 780 780 781 781 # read and add best residual norms 782 elif str ncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'norm', 4):782 elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('norm'): 783 783 print(' ' + fline) 784 784 dresp.best.norm = tokens[5] … … 787 787 fline = fidi.readline() 788 788 789 while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < << Best ', 11):789 while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<< Best ', 11): 790 790 fline = fidi.readline() 791 791 792 792 # read and add best residual term(s) 793 elif str ncmpi(str(tokens[2]), 'residual', 8) and strncmpi(str(tokens[3]), 'term', 4):793 elif str(tokens[2]).startswith('residual') and str(tokens[3]).startswith('term'): 794 794 print(' ' + fline) 795 795 … … 797 797 dresp.best.res = [] 798 798 799 while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):799 while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '): 800 800 [ntokens, tokens] = fltokens(fline) 801 801 dresp.best.res.append(0) … … 804 804 805 805 # read and add best constraint value(s) 806 elif str ncmpi(str(tokens[2]), 'constraint', 10) and strncmpi(str(tokens[3]), 'value', 5):806 elif str(tokens[2]).startswith('constraint') and str(tokens[3]).startswith('value'): 807 807 print(' ' + fline) 808 808 … … 810 810 dresp.best.nc = [] 811 811 812 while (fline != '' and not fline.isspace()) and not strncmpi(fline, '<<<<<Best ', 11):812 while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '): 813 813 [ntokens, tokens] = fltokens(fline) 814 814 dresp.best.nc.append(0) … … 817 817 818 818 # read and add best data captured 819 elif str ncmpi(str(tokens[2]), 'data', 4) and strncmpi(str(tokens[3]), 'captured', 8):819 elif str(tokens[2]).startswith('data') and str(tokens[3]).startswith('captured'): 820 820 print(' ' + fline) 821 821 dresp.best.eval = tokens[7] … … 823 823 fline = fidi.readline() 824 824 825 while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):825 while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '): 826 826 fline = fidi.readline() 827 827 … … 832 832 fline = fidi.readline() 833 833 834 while (fline != '' and not fline.isspace()) and not strncmpi(fline, ' < < < < < Best ', 11):834 while (fline != '' and not fline.isspace()) and not fline.startswith('<<<<< Best '): 835 835 fline = fidi.readline() 836 836 … … 903 903 for fline in fidi: 904 904 npos += len(fline) 905 if (strncmpi(fline, string, len(string))):905 if fline.startswith(string): 906 906 if goto_line: 907 907 fidi.seek(npos, 0) -
issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py
r25817 r26183 3 3 import numpy as np 4 4 from results import solution 5 from netCDF4 import Dataset 5 6 6 7 … … 75 76 # }}} 76 77 78 77 79 def parseresultsfromdiskioserial(md, filename): # {{{ 78 80 # Open file 79 81 try: 80 82 fid = open(filename, 'rb') 81 except IOError as e:83 except IOError: 82 84 raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filename)) 83 85 print(md.results) 84 86 # Collect all results in a list 85 87 allresults = [] … … 87 89 # Read next result 88 90 result = ReadData(fid, md) 89 90 91 if result is None: 91 92 if allresults == []: … … 93 94 else: 94 95 break 96 # print("now got {} for time {}".format(result['fieldname'], result['time'])) 97 # print("with keys {}".format(result.keys())) 98 # print("==============================") 95 99 96 100 allresults.append(result) … … 119 123 return results 120 124 # }}} 125 126 127 def parseresultsfromdisktonc(md, filin, filout): # {{{ 128 verbose = 0 129 # Open file 130 try: 131 fid = open(filin, 'rb') 132 except IOError: 133 raise IOError("parseresultsfromdisk error message: could not open {} for binary reading".format(filin)) 134 135 NCData = Dataset(filout, 'w', format='NETCDF4') 136 UnlimDim = NCData.createDimension('Unlim', None) 137 DimDict = {len(UnlimDim): 'Inf'} 138 dimindex = 2 139 dimlist = [2, md.mesh.numberofelements, md.mesh.numberofvertices, np.shape(md.mesh.elements)[1]] 140 dimnames = ['DictDummy', 'EltNum', 'VertNum', 'VertPerElt'] 141 if verbose > 0: 142 print('===Creating dimensions ===') 143 for i, newdim in enumerate(dimlist): 144 if newdim not in list(DimDict.keys()): 145 dimindex += 1 146 NewDim = NCData.createDimension(dimnames[i], newdim) 147 DimDict[len(NewDim)] = dimnames[i] 148 149 typelist = [bool, str, int, float, complex, 150 collections.OrderedDict, 151 np.int64, np.ndarray, np.float64] 152 153 # Collect all results in a list 154 allresults = [] 155 while True: 156 # Read next result 157 result = ReadData(fid, md) 158 if result is None: 159 if allresults == []: 160 raise Exception('no results found in binary file ' + filename) 161 else: 162 break 163 print("now got {} for time {}".format(result['fieldname'], result['time'])) 164 print("with keys {}".format(result.keys())) 165 print("==============================") 166 167 allresults.append(result) 168 fid.close() 169 170 # Now, process all results and find out how many steps we have 171 numresults = len(allresults) 172 allsteps = np.zeros((numresults, 1)) 173 for i in range(numresults): 174 allsteps[i] = allresults[i]['step'] 175 pos = np.where(allsteps != -9999) 176 allsteps = np.sort(np.unique(allsteps[pos])) 177 178 # Ok, now construct structure 179 results = solution() 180 181 for i in range(numresults): 182 result = allresults[i] 183 index = 0 184 if result['step'] != -9999: 185 index = np.where(result['step'] == allsteps)[0][0] 186 setattr(results[index], 'step', result['step']) 187 if result['time'] != -9999: 188 setattr(results[index], 'time', result['time']) 189 setattr(results[index], result['fieldname'], result['field']) 190 return results 191 # }}} 192 121 193 122 194 def ReadData(fid, md): # {{{ … … 321 393 # }}} 322 394 395 323 396 def addfieldtorecord(a, descr): #{{{ 324 397 if a.dtype.fields is None:
Note:
See TracChangeset
for help on using the changeset viewer.