Changeset 23772
- Timestamp:
- 03/08/19 05:00:41 (6 years ago)
- Location:
- issm/trunk-jpl/src/m
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/classes/issmsettings.py
r23758 r23772 3 3 from WriteData import WriteData 4 4 5 5 6 class issmsettings(object): 6 7 7 """ 8 ISSMSETTINGS class definition 8 9 9 10 11 10 Usage: 11 issmsettings=issmsettings(); 12 """ 12 13 13 def __init__(self):# {{{14 self.results_on_nodes= []15 self.io_gather= 016 self.lowmem= 017 self.output_frequency= 018 self.coupling_frequency= 019 20 self.waitonlock= 021 14 def __init__(self): # {{{ 15 self.results_on_nodes = [] 16 self.io_gather = 0 17 self.lowmem = 0 18 self.output_frequency = 0 19 self.coupling_frequency = 0 20 self.recording_frequency = 0 21 self.waitonlock = 0 22 self.solver_residue_threshold = 0 22 23 23 24 24 #set defaults 25 self.setdefaultparameters() 25 26 26 27 def __repr__(self):# {{{28 string=" general issmsettings parameters:"27 #}}} 28 def __repr__(self): # {{{ 29 string = " general issmsettings parameters:" 29 30 30 string="%s\n%s"%(string,fielddisplay(self,"results_on_nodes","list of output for which results will be output for all the nodes of each element, Use 'all' for all output on nodes.")) 31 string="%s\n%s"%(string,fielddisplay(self,"io_gather","I/O gathering strategy for result outputs (default 1)")) 32 string="%s\n%s"%(string,fielddisplay(self,"lowmem","is the memory limited ? (0 or 1)")) 33 string="%s\n%s"%(string,fielddisplay(self,"output_frequency","frequency at which results are saved in all solutions with multiple time_steps")) 34 string="%s\n%s"%(string,fielddisplay(self,"sb_coupling_frequency","frequency at which StressBalance solver is coupled (default 1)")) 35 string="%s\n%s"%(string,fielddisplay(self,"recording_frequency","frequency at which the runs are being recorded, allowing for a restart")) 36 string="%s\n%s"%(string,fielddisplay(self,"waitonlock","maximum number of minutes to wait for batch results, or return 0")) 37 string="%s\n%s"%(string,fielddisplay(self,"solver_residue_threshold","throw an error if solver residue exceeds this value (NaN to deactivate)")) 38 return string 39 #}}} 40 def setdefaultparameters(self): # {{{ 41 42 #are we short in memory ? (0 faster but requires more memory) 43 self.lowmem=0 31 string = "%s\n%s" % (string, fielddisplay(self, "results_on_nodes", "list of output for which results will be output for all the nodes of each element, Use 'all' for all output on nodes.")) 32 string = "%s\n%s" % (string, fielddisplay(self, "io_gather", "I/O gathering strategy for result outputs (default 1)")) 33 string = "%s\n%s" % (string, fielddisplay(self, "lowmem", "is the memory limited ? (0 or 1)")) 34 string = "%s\n%s" % (string, fielddisplay(self, "output_frequency", "frequency at which results are saved in all solutions with multiple time_steps")) 35 string = "%s\n%s" % (string, fielddisplay(self, "sb_coupling_frequency", "frequency at which StressBalance solver is coupled (default 1)")) 36 string = "%s\n%s" % (string, fielddisplay(self, "recording_frequency", "frequency at which the runs are being recorded, allowing for a restart")) 37 string = "%s\n%s" % (string, fielddisplay(self, "waitonlock", "maximum number of minutes to wait for batch results, or return 0")) 38 string = "%s\n%s" % (string, fielddisplay(self, "solver_residue_threshold", "throw an error if solver residue exceeds this value (NaN to deactivate)")) 39 return string 40 #}}} 44 41 45 #i/o: 46 self.io_gather=1 42 def setdefaultparameters(self): # {{{ 47 43 48 #results frequency by default every step 49 self.output_frequency=1 44 #are we short in memory ? (0 faster but requires more memory) 45 self.lowmem = 0 50 46 51 #coupling frequency of the stress balance solver by default every step 52 self.sb_coupling_frequency=1 53 54 #checkpoints frequency, by default never: 55 self.recording_frequency=0 47 #i/o: 48 self.io_gather = 1 56 49 50 #results frequency by default every step 51 self.output_frequency = 1 57 52 58 #this option can be activated to load automatically the results 59 #onto the model after a parallel run by waiting for the lock file 60 #N minutes that is generated once the solution has converged 61 #0 to deactivate 62 self.waitonlock=2**31-1 53 #coupling frequency of the stress balance solver by default every step 54 self.sb_coupling_frequency = 1 63 55 64 #throw an error if solver residue exceeds this value65 self.solver_residue_threshold=1e-6; 56 #checkpoints frequency, by default never: 57 self.recording_frequency = 0 66 58 67 return self 68 #}}} 69 def checkconsistency(self,md,solution,analyses): # {{{ 70 md = checkfield(md,'fieldname','settings.results_on_nodes','stringrow',1) 71 md = checkfield(md,'fieldname','settings.io_gather','numel',[1],'values',[0,1]) 72 md = checkfield(md,'fieldname','settings.lowmem','numel',[1],'values',[0,1]) 73 md = checkfield(md,'fieldname','settings.output_frequency','numel',[1],'>=',1) 74 md = checkfield(md,'fieldname','settings.sb_coupling_frequency','numel',[1],'>=',1) 75 md = checkfield(md,'fieldname','settings.recording_frequency','numel',[1],'>=',0) 76 md = checkfield(md,'fieldname','settings.waitonlock','numel',[1]) 77 md = checkfield(md,'fieldname','settings.solver_residue_threshold','numel',[1],'>',0) 59 #this option can be activated to load automatically the results 60 #onto the model after a parallel run by waiting for the lock file 61 #N minutes that is generated once the solution has converged 62 #0 to deactivate 63 self.waitonlock = 2 ** 31 - 1 78 64 79 return md 80 # }}} 81 def marshall(self,prefix,md,fid): # {{{ 82 WriteData(fid,prefix,'data',self.results_on_nodes,'name','md.settings.results_on_nodes','format','StringArray') 83 WriteData(fid,prefix,'object',self,'class','settings','fieldname','io_gather','format','Boolean') 84 WriteData(fid,prefix,'object',self,'class','settings','fieldname','lowmem','format','Boolean') 85 WriteData(fid,prefix,'object',self,'class','settings','fieldname','output_frequency','format','Integer') 86 WriteData(fid,prefix,'object',self,'class','settings','fieldname','sb_coupling_frequency','format','Integer') 87 WriteData(fid,prefix,'object',self,'class','settings','fieldname','recording_frequency','format','Integer') 88 89 if self.waitonlock>0: 90 WriteData(fid,prefix,'name','md.settings.waitonlock','data',True,'format','Boolean') 91 else: 92 WriteData(fid,prefix,'name','md.settings.waitonlock','data',False,'format','Boolean') 93 WriteData(fid,prefix,'object',self,'fieldname','solver_residue_threshold','format','Double') 94 # }}} 65 #throw an error if solver residue exceeds this value 66 self.solver_residue_threshold = 1e-6 67 68 return self 69 #}}} 70 71 def checkconsistency(self, md, solution, analyses): # {{{ 72 md = checkfield(md, 'fieldname', 'settings.results_on_nodes', 'stringrow', 1) 73 md = checkfield(md, 'fieldname', 'settings.io_gather', 'numel', [1], 'values', [0, 1]) 74 md = checkfield(md, 'fieldname', 'settings.lowmem', 'numel', [1], 'values', [0, 1]) 75 md = checkfield(md, 'fieldname', 'settings.output_frequency', 'numel', [1], '>=', 1) 76 md = checkfield(md, 'fieldname', 'settings.sb_coupling_frequency', 'numel', [1], '>=', 1) 77 md = checkfield(md, 'fieldname', 'settings.recording_frequency', 'numel', [1], '>=', 0) 78 md = checkfield(md, 'fieldname', 'settings.waitonlock', 'numel', [1]) 79 md = checkfield(md, 'fieldname', 'settings.solver_residue_threshold', 'numel', [1], '>', 0) 80 81 return md 82 # }}} 83 84 def marshall(self, prefix, md, fid): # {{{ 85 WriteData(fid, prefix, 'data', self.results_on_nodes, 'name', 'md.settings.results_on_nodes', 'format', 'StringArray') 86 WriteData(fid, prefix, 'object', self, 'class', 'settings', 'fieldname', 'io_gather', 'format', 'Boolean') 87 WriteData(fid, prefix, 'object', self, 'class', 'settings', 'fieldname', 'lowmem', 'format', 'Boolean') 88 WriteData(fid, prefix, 'object', self, 'class', 'settings', 'fieldname', 'output_frequency', 'format', 'Integer') 89 WriteData(fid, prefix, 'object', self, 'class', 'settings', 'fieldname', 'sb_coupling_frequency', 'format', 'Integer') 90 WriteData(fid, prefix, 'object', self, 'class', 'settings', 'fieldname', 'recording_frequency', 'format', 'Integer') 91 92 if self.waitonlock > 0: 93 WriteData(fid, prefix, 'name', 'md.settings.waitonlock', 'data', True, 'format', 'Boolean') 94 else: 95 WriteData(fid, prefix, 'name', 'md.settings.waitonlock', 'data', False, 'format', 'Boolean') 96 WriteData(fid, prefix, 'object', self, 'fieldname', 'solver_residue_threshold', 'format', 'Double') 97 # }}} -
issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.py
r23716 r23772 1 from netCDF4 import Dataset , stringtochar1 from netCDF4 import Dataset 2 2 import numpy as np 3 3 import time 4 4 import collections 5 from mesh2d import *6 from mesh3dprisms import *7 from results import *8 5 from os import path, remove 9 6 10 def export_netCDF(md,filename): 11 #Now going on Real treatment 12 if path.exists(filename): 13 print(('File {} allready exist'.format(filename))) 14 newname=eval(input('Give a new name or "delete" to replace: ')) 15 if newname=='delete': 16 remove(filename) 17 else: 18 print(('New file name is {}'.format(newname))) 19 filename=newname 20 21 NCData=Dataset(filename, 'w', format='NETCDF4') 22 NCData.description = 'Results for run' + md.miscellaneous.name 23 NCData.history = 'Created ' + time.ctime(time.time()) 24 25 #define netCDF dimensions 26 try: 27 StepNum=np.shape(dict.values(md.results.__dict__))[1] 28 except IndexError: 29 StepNum=1 30 Dimension1=NCData.createDimension('DimNum1',StepNum)#time is first 31 DimDict={len(Dimension1):'DimNum1'} 32 dimindex=1 33 34 dimlist=[2,md.mesh.numberofelements,md.mesh.numberofvertices,np.shape(md.mesh.elements)[1]] 35 for i in range(0,4): 36 if dimlist[i] not in list(DimDict.keys()): 37 dimindex+=1 38 NewDim=NCData.createDimension('DimNum'+str(dimindex),dimlist[i]) 39 DimDict[len(NewDim)]='DimNum'+str(dimindex) 40 41 typelist=[bool,str,str,int,float,complex, 42 collections.OrderedDict, 43 np.int64,np.ndarray,np.float64] 44 groups=dict.keys(md.__dict__) 45 #get all model classes and create respective groups 46 for group in groups: 47 NCgroup=NCData.createGroup(str(group)) 48 #In each group gather the fields of the class 49 fields=dict.keys(md.__dict__[group].__dict__) 50 51 #looping on fields 52 for field in fields: 53 #Special treatment for list fields 54 if type(md.__dict__[group].__dict__[field])==list: 55 StdList=False 56 if len(md.__dict__[group].__dict__[field])==0: 57 StdList=True 58 else: 59 StdList=type(md.__dict__[group].__dict__[field][0]) in typelist 60 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 61 if StdList: #this is a standard or empty list just proceed 62 Var=md.__dict__[group].__dict__[field] 63 DimDict=CreateVar(NCData,Var,field,NCgroup,DimDict) 64 else: #this is a list of fields, specific treatment needed 65 Listsize=len(md.__dict__[group].__dict__[field]) 66 Subgroup=NCgroup.createGroup(str(field)) 67 Subgroup.__setattr__('classtype',md.__dict__[group].__dict__[field].__class__.__name__) 68 for listindex in range(0,Listsize): 69 try: 70 Listgroup=Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__['name'])) 71 except KeyError: 72 for naming in ['step']: 73 Listgroup=Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[naming])) 74 except AttributeError: 75 Listgroup=Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__class__.__name__)+str(listindex)) 76 Listgroup.__setattr__('classtype',md.__dict__[group].__dict__[field].__getitem__(listindex).__class__.__name__) 77 try: 78 subfields=dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__) 79 except AttributeError: 80 subfields=dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex)) 81 for subfield in subfields: 82 if subfield!='outlog': 83 try: 84 Var=md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[subfield] 85 except AttributeError: 86 Var=md.__dict__[group].__dict__[field].__getitem__(listindex)[subfield] 87 DimDict=CreateVar(NCData,Var,subfield,Listgroup,DimDict,md.__dict__[group],field,listindex) 88 89 #No subgroup, we directly treat the variable 90 elif type(md.__dict__[group].__dict__[field]) in typelist or field=='bamg': 91 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 92 Var=md.__dict__[group].__dict__[field] 93 DimDict=CreateVar(NCData,Var,field,NCgroup,DimDict) 94 elif md.__dict__[group].__dict__[field] is None: 95 print(( 'field md.{}.{} is None'.format(group,field))) 96 #do nothing 97 #if it is a masked array 98 elif type(md.__dict__[group].__dict__[field]) is np.ma.core.MaskedArray: 99 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 100 Var=md.__dict__[group].__dict__[field].data 101 DimDict=CreateVar(NCData,Var,field,NCgroup,DimDict) 102 else: 103 NCgroup.__setattr__('classtype', str(group)) 104 Subgroup=NCgroup.createGroup(str(field)) 105 Subgroup.__setattr__('classtype',md.__dict__[group].__class__.__name__) 106 subfields=dict.keys(md.__dict__[group].__dict__[field].__dict__) 107 108 for subfield in subfields: 109 if str(subfield)!='outlog': 110 Var=md.__dict__[group].__dict__[field].__dict__[subfield] 111 DimDict=CreateVar(NCData,Var,subfield,Subgroup,DimDict) 112 113 NCData.close() 114 115 #============================================================================ 116 #Define the variables 117 def CreateVar(NCData,var,field,Group,DimDict,*step_args): 118 #grab type 119 try: 120 val_type=str(var.dtype) 121 except AttributeError: 122 val_type=type(var) 123 #grab dimension 124 try: 125 val_shape=dict.keys(var) 126 except TypeError: 127 val_shape=np.shape(var) 128 129 TypeDict = {float:'f8', 130 'float64':'f8', 131 np.float64:'f8', 132 int:'i8', 133 'int64':'i8', 134 np.int64:'i8', 135 str:str, 136 dict:str} 137 138 val_dim=np.shape(val_shape)[0] 139 140 #Now define and fill up variable 141 #treating scalar string or bool as atribute 142 if val_type in [str,str,bool]: 143 Group.__setattr__(str(field).swapcase(), str(var)) 144 #treating list as string table 145 elif val_type==list: 146 dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim) 147 #try to get the type from the first element 148 try: 149 nctype=TypeDict[type(var[0])] 150 except IndexError: 151 nctype=str #most probably an empty list take str for that 152 ncvar = Group.createVariable(str(field),nctype,dimensions,zlib=True) 153 if val_shape==0: 154 ncvar= [] 155 else: 156 for elt in range(0,val_shape[0]): 157 ncvar[elt] = var[elt] 158 #treating bool tables as string tables 159 elif val_type=='bool': 160 dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim) 161 ncvar = Group.createVariable(str(field),str,dimensions,zlib=True) 162 for elt in range(0,val_shape[0]): 163 ncvar[elt] = str(var[elt]) 164 #treating dictionaries as tables of strings 165 elif val_type==collections.OrderedDict or val_type==dict: 166 dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim) 167 ncvar = Group.createVariable(str(field),str,dimensions,zlib=True) 168 for elt in range(0,val_dim): 169 ncvar[elt,0]=dict.keys(var)[elt] 170 ncvar[elt,1]=str(dict.values(var)[elt]) #converting to str to avoid potential problems 171 #Now dealing with numeric variables 172 elif val_type in [float,'float64',np.float64,int,'int64']: 173 dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim) 174 ncvar = Group.createVariable(str(field),TypeDict[val_type],dimensions,zlib=True) 175 try: 176 nan_val=np.isnan(var) 177 if nan_val.all(): 178 ncvar [:] = 'NaN' 179 else: 180 ncvar[:] = var 181 except TypeError: #type does not accept nan, get vallue of the variable 182 ncvar[:] = var 183 else: 184 print(('WARNING type "{}" is unknown for "{}.{}"'.format(val_type,Group.name,field))) 185 return DimDict 186 187 #============================================================================ 188 #retriev the dimension tuple from a dictionnary 189 def GetDim(NCData,var,shape,DimDict,i): 190 output=[] 191 #grab dimension 192 for dim in range(0,i): #loop on the dimensions 193 if type(shape[0])==int: 194 try: 195 output=output+[str(DimDict[shape[dim]])] #test if the dimension allready exist 196 except KeyError: #if not create it 197 if (shape[dim])>0: 198 index=len(DimDict)+1 199 NewDim=NCData.createDimension('DimNum'+str(index),(shape[dim])) 200 DimDict[len(NewDim)]='DimNum'+str(index) 201 output=output+[str(DimDict[shape[dim]])] 202 elif type(shape[0])==str or type(shape[0])==str:#dealling with a dictionnary 203 try: 204 #dimension5 is 2 to treat with dict 205 output=[str(DimDict[np.shape(shape)[0]])]+[DimDict[2]] 206 except KeyError: 207 index=len(DimDict)+1 208 NewDim=NCData.createDimension('DimNum'+str(index),np.shape(shape)[0]) 209 DimDict[len(NewDim)]='DimNum'+str(index) 210 output=[str(DimDict[np.shape(dict.keys(var))[0]])]+[DimDict[2]] 211 break 212 return tuple(output), DimDict 7 8 def export_netCDF(md, filename): 9 if path.exists(filename): 10 print('File {} allready exist'.format(filename)) 11 newname = eval(input('Give a new name or "delete" to replace: ')) 12 if newname == 'delete': 13 remove(filename) 14 else: 15 print(('New file name is {}'.format(newname))) 16 filename = newname 17 NCData = Dataset(filename, 'w', format='NETCDF4') 18 NCData.description = 'Results for run' + md.miscellaneous.name 19 NCData.history = 'Created ' + time.ctime(time.time()) 20 # define netCDF dimensions 21 try: 22 StepNum = np.shape(dict.values(md.results.__dict__))[1] 23 except IndexError: 24 StepNum = 1 25 Dimension1 = NCData.createDimension('DimNum1', StepNum) # time is first 26 DimDict = {len(Dimension1): 'DimNum1'} 27 dimindex = 1 28 dimlist = [2, md.mesh.numberofelements, md.mesh.numberofvertices, np.shape(md.mesh.elements)[1]] 29 for i in range(0, 4): 30 if dimlist[i] not in list(DimDict.keys()): 31 dimindex += 1 32 NewDim = NCData.createDimension('DimNum'+str(dimindex), dimlist[i]) 33 DimDict[len(NewDim)] = 'DimNum' + str(dimindex) 34 typelist = [bool, str, str, int, float, complex, 35 collections.OrderedDict, 36 np.int64, np.ndarray, np.float64] 37 groups = dict.keys(md.__dict__) 38 # get all model classes and create respective groups 39 for group in groups: 40 NCgroup = NCData.createGroup(str(group)) 41 # In each group gather the fields of the class 42 fields = dict.keys(md.__dict__[group].__dict__) 43 # looping on fields 44 for field in fields: 45 # Special treatment for list fields 46 if type(md.__dict__[group].__dict__[field]) == list: 47 StdList = False 48 if len(md.__dict__[group].__dict__[field]) == 0: 49 StdList = True 50 else: 51 StdList = type(md.__dict__[group].__dict__[field][0]) in typelist 52 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 53 if StdList: # this is a standard or empty list just proceed 54 Var = md.__dict__[group].__dict__[field] 55 DimDict = CreateVar(NCData, Var, field, NCgroup, DimDict) 56 else: # this is a list of fields, specific treatment needed 57 Listsize = len(md.__dict__[group].__dict__[field]) 58 Subgroup = NCgroup.createGroup(str(field)) 59 Subgroup.__setattr__('classtype', md.__dict__[group].__dict__[field].__class__.__name__) 60 for listindex in range(0, Listsize): 61 try: 62 Listgroup = Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__['name'])) 63 except KeyError: 64 for naming in ['step']: 65 Listgroup = Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[naming])) 66 except AttributeError: 67 Listgroup=Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__class__.__name__)+str(listindex)) 68 Listgroup.__setattr__('classtype',md.__dict__[group].__dict__[field].__getitem__(listindex).__class__.__name__) 69 try: 70 subfields=dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__) 71 except AttributeError: 72 subfields=dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex)) 73 for subfield in subfields: 74 if subfield!='outlog': 75 try: 76 Var=md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[subfield] 77 except AttributeError: 78 Var = md.__dict__[group].__dict__[field].__getitem__(listindex)[subfield] 79 DimDict = CreateVar(NCData,Var,subfield,Listgroup,DimDict,md.__dict__[group],field,listindex) 80 # No subgroup, we directly treat the variable 81 elif type(md.__dict__[group].__dict__[field]) in typelist or field == 'bamg': 82 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 83 Var = md.__dict__[group].__dict__[field] 84 DimDict = CreateVar(NCData, Var, field, NCgroup, DimDict) 85 elif md.__dict__[group].__dict__[field] is None: 86 print('field md.{}.{} is None'.format(group, field)) 87 # do nothing 88 # if it is a masked array 89 elif type(md.__dict__[group].__dict__[field]) is np.ma.core.MaskedArray: 90 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 91 Var = md.__dict__[group].__dict__[field].data 92 DimDict = CreateVar(NCData,Var,field,NCgroup,DimDict) 93 else: 94 NCgroup.__setattr__('classtype', str(group)) 95 Subgroup = NCgroup.createGroup(str(field)) 96 Subgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 97 subfields = dict.keys(md.__dict__[group].__dict__[field].__dict__) 98 for subfield in subfields: 99 if str(subfield) != 'outlog': 100 Var = md.__dict__[group].__dict__[field].__dict__[subfield] 101 DimDict = CreateVar(NCData, Var, subfield, Subgroup, DimDict) 102 NCData.close() 103 104 # ============================================================================ 105 # Define the variables 106 107 108 def CreateVar(NCData, var, field, Group, DimDict, *step_args): 109 # grab type 110 try: 111 val_type = str(var.dtype) 112 except AttributeError: 113 val_type = type(var) 114 # grab dimension 115 if val_type in [collections.OrderedDict, dict]: 116 val_shape = len(var) 117 val_dim = 2 118 else: 119 val_shape = np.shape(var) 120 val_dim = np.shape(val_shape)[0] 121 TypeDict = {float: 'f8', 122 'float64': 'f8', 123 np.float64: 'f8', 124 int: 'i8', 125 'int64': 'i8', 126 np.int64: 'i8', 127 str: str, 128 dict: str} 129 # Now define and fill up variable 130 # treating scalar string or bool as atribute 131 if val_type in [str, bool]: 132 Group.__setattr__(str(field).swapcase(), str(var)) 133 # treating list as string table 134 elif val_type == list: 135 dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim) 136 # try to get the type from the first element 137 try: 138 nctype = TypeDict[type(var[0])] 139 except IndexError: 140 nctype = str # most probably an empty list take str for that 141 ncvar = Group.createVariable(str(field), nctype, dimensions, zlib=True) 142 if val_shape == 0: 143 ncvar = [] 144 else: 145 for elt in range(0, val_shape[0]): 146 ncvar[elt] = var[elt] 147 # treating bool tables as string tables 148 elif val_type == 'bool': 149 dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim) 150 ncvar = Group.createVariable(str(field), str, dimensions, zlib=True) 151 for elt in range(0, val_shape[0]): 152 ncvar[elt] = str(var[elt]) 153 # treating dictionaries as tables of strings 154 elif val_type in [collections.OrderedDict, dict]: 155 dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim) 156 ncvar = Group.createVariable(str(field), str, dimensions, zlib=True) 157 for elt, key in enumerate(dict.keys(var)): 158 ncvar[elt, 0] = key 159 ncvar[elt, 1] = str(var[key]) # converting to str to avoid potential problems 160 # Now dealing with numeric variables 161 elif val_type in [float, 'float64', np.float64, int, 'int64']: 162 dimensions, DimDict = GetDim(NCData, val_shape, val_type, DimDict, val_dim) 163 ncvar = Group.createVariable(str(field), TypeDict[val_type], dimensions, zlib=True) 164 try: 165 nan_val = np.isnan(var) 166 if nan_val.all(): 167 ncvar[:] = 'NaN' 168 else: 169 ncvar[:] = var 170 except TypeError: # type does not accept nan, get vallue of the variable 171 ncvar[:] = var 172 else: 173 print(('WARNING type "{}" is unknown for "{}.{}"'.format(val_type, Group.name, field))) 174 return DimDict 175 176 # ============================================================================ 177 # retriev the dimension tuple from a dictionnary 178 179 180 def GetDim(NCData, val_shape, val_type, DimDict, val_dim): 181 output = [] 182 if val_type in [collections.OrderedDict, dict]: # dealling with a dictionnary 183 try: 184 output = [str(DimDict[val_shape])] # first try to get the coresponding dimension if ti exists 185 output = output + [DimDict[2]] # dimension5 is 2 to treat with dict 186 except KeyError: 187 index = len(DimDict) + 1 # if the dimension does not exist, increment naming 188 NewDim = NCData.createDimension('DimNum'+str(index), val_shape) # create dimension 189 DimDict[len(NewDim)] = 'DimNum' + str(index) # and update the dimension dictionary 190 output = [str(DimDict[val_shape])] + [DimDict[2]] # now proceed with the shape of the value 191 else: 192 # loop on dimensions 193 for dim in range(0, val_dim): # loop on the dimensions 194 try: 195 output = output + [str(DimDict[val_shape[dim]])] # test if the dimension allready exist 196 except KeyError: # if not create it 197 if (val_shape[dim]) > 0: 198 index = len(DimDict) + 1 199 NewDim = NCData.createDimension('DimNum' + str(index), (val_shape[dim])) 200 DimDict[len(NewDim)] = 'DimNum' + str(index) 201 output = output + [str(DimDict[val_shape[dim]])] 202 return tuple(output), DimDict -
issm/trunk-jpl/src/m/contrib/defleurian/netCDF/read_netCDF.py
r23716 r23772 1 1 from netCDF4 import Dataset 2 import time3 import collections4 from os import path, remove5 2 6 3 def netCDFRead(filename): 7 8 def walktree(data): 9 keys = list(data.groups.keys()) 10 yield keys 11 for key in keys: 12 for children in walktree(data.groups[str(key)]): 13 yield children 14 15 if path.exists(filename): 16 print(('Opening {} for reading '.format(filename))) 17 NCData=Dataset(filename, 'r') 18 class_dict={} 19 20 for children in walktree(NCData): 21 for child in children: 22 class_dict[str(child)]=str(getattr(NCData.groups[str(child)],'classtype')+'()') 4 def walktree(data): 5 keys = list(data.groups.keys()) 6 yield keys 7 for key in keys: 8 for children in walktree(data.groups[str(key)]): 9 yield children 23 10 24 print(class_dict) 25 11 if path.exists(filename): 12 print(('Opening {} for reading '.format(filename))) 13 NCData=Dataset(filename, 'r') 14 class_dict={} 15 16 for children in walktree(NCData): 17 for child in children: 18 class_dict[str(child)]=str(getattr(NCData.groups[str(child)],'classtype')+'()') 19 20 print(class_dict) -
issm/trunk-jpl/src/m/contrib/defleurian/paraview/enveloppeVTK.py
r23716 r23772 28 28 if os.path.exists(filename): 29 29 print(('File {} allready exist'.format(filename))) 30 newname= eval(input('Give a new name or "delete" to replace: '))30 newname=input('Give a new name or "delete" to replace: ') 31 31 if newname=='delete': 32 32 filelist = glob.glob(filename+'/*') -
issm/trunk-jpl/src/m/io/loadmodel.py
r23719 r23772 2 2 #hack to keep python 2 compatipility 3 3 try: 4 5 from dbm.ndbm import whichdb4 #py3 import 5 from dbm import whichdb 6 6 except ImportError: 7 8 7 #py2 import 8 from whichdb import whichdb 9 9 from netCDF4 import Dataset 10 10 11 11 12 def loadmodel(path): 12 13 13 """ 14 LOADMODEL - load a model using built-in load module 14 15 15 16 check that model prototype has not changed. if so, adapt to new model prototype. 16 17 17 18 19 18 Usage: 19 md=loadmodel(path) 20 """ 20 21 21 22 23 24 25 26 27 NCFile=Dataset(path,mode='r')28 29 30 31 32 #try:33 34 struc=loadvars(path)35 name=[key for key in list(struc.keys())]36 if len(name)>1:37 22 #check existence of database (independent of file extension!) 23 if whichdb(path): 24 #do nothing 25 pass 26 else: 27 try: 28 NCFile = Dataset(path, mode='r') 29 NCFile.close() 30 pass 31 except RuntimeError: 32 raise IOError("loadmodel error message: file '%s' does not exist" % path) 33 # try: 34 #recover model on file and name it md 35 struc = loadvars(path) 36 name = [key for key in list(struc.keys())] 37 if len(name) > 1: 38 raise IOError("loadmodel error message: file '%s' contains several variables. Only one model should be present." % path) 38 39 39 md=struc[name[0]]40 40 md = struc[name[0]] 41 return md -
issm/trunk-jpl/src/m/io/loadvars.py
r23719 r23772 1 1 import shelve 2 import os.path 3 import numpy as np 2 import numpy as np 4 3 from netCDF4 import Dataset 5 from netCDF4 import chartostring6 4 from re import findall 7 from os import path8 5 from collections import OrderedDict 9 6 from model import * 10 #hack to keep python 2 compati pility7 #hack to keep python 2 compatibility 11 8 try: 12 13 from dbm.ndbm import whichdb9 #py3 import 10 from dbm import whichdb 14 11 except ImportError: 15 #py2 import 16 from whichdb import whichdb 12 #py2 import 13 from whichdb import whichdb 14 17 15 18 16 def loadvars(*args): 19 """ 20 LOADVARS - function to load variables to a file. 21 22 This function loads one or more variables from a file. The names of the variables 23 must be supplied. If more than one variable is specified, it may be done with 24 a list of names or a dictionary of name as keys. The output type will correspond 25 to the input type. All the variables in the file may be loaded by specifying only 26 the file name. 27 28 Usage: 29 a=loadvars('shelve.dat','a') 30 [a,b]=loadvars('shelve.dat',['a','b']) 31 nvdict=loadvars('shelve.dat',{'a':None,'b':None}) 32 nvdict=loadvars('shelve.dat') 33 34 """ 35 36 filename='' 37 nvdict={} 38 39 if len(args) >= 1 and isinstance(args[0],str): 40 filename=args[0] 41 if not filename: 42 filename='/tmp/shelve.dat' 43 44 else: 45 raise TypeError("Missing file name.") 46 47 if len(args) >= 2 and isinstance(args[1],str): # (filename,name) 48 for name in args[1:]: 49 nvdict[name]=None 50 51 elif len(args) == 2 and isinstance(args[1],list): # (filename,[names]) 52 for name in args[1]: 53 nvdict[name]=None 54 55 elif len(args) == 2 and isinstance(args[1],dict): # (filename,{names:values}) 56 nvdict=args[1] 57 58 elif len(args) == 1: # (filename) 59 pass 60 61 else: 62 raise TypeError("Unrecognized input arguments.") 63 64 if whichdb(filename): 65 print(("Loading variables from file '%s'." % filename)) 66 67 my_shelf = shelve.open(filename,'r') # 'r' for read-only 68 if nvdict: 69 for name in list(nvdict.keys()): 70 try: 71 nvdict[name] = my_shelf[name] 72 print(("Variable '%s' loaded." % name)) 73 except KeyError: 74 value = None 75 print(("Variable '%s' not found." % name)) 76 77 else: 78 for name in list(my_shelf.keys()): 79 nvdict[name] = my_shelf[name] 80 print(("Variable '%s' loaded." % name)) 81 82 my_shelf.close() 83 84 else: 85 try: 86 NCFile=Dataset(filename,mode='r') 87 NCFile.close() 88 except RuntimeError: 89 raise IOError("File '%s' not found." % filename) 90 91 classtype,classtree=netCDFread(filename) 92 nvdict['md']=model() 93 NCFile=Dataset(filename,mode='r') 94 for mod in dict.keys(classtype): 95 if np.size(classtree[mod])>1: 96 curclass=NCFile.groups[classtree[mod][0]].groups[classtree[mod][1]] 97 if classtype[mod][0]=='list': 98 keylist=[key for key in curclass.groups] 99 try: 100 steplist=[int(key) for key in curclass.groups] 101 except ValueError: 102 steplist=[int(findall(r'\d+',key)[0]) for key in keylist] 103 indexlist=[index*(len(curclass.groups)-1)/max(1,max(steplist)) for index in steplist] 104 listtype=curclass.groups[keylist[0]].classtype 105 if listtype=='dict': 106 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [OrderedDict() for i in range(max(1,len(curclass.groups)))] 107 else: 108 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [getattr(__import__(listtype),listtype)() for i in range(max(1,len(curclass.groups)))] 109 Tree=nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]][:] 110 else: 111 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = getattr(classtype[mod][1],classtype[mod][0])() 112 Tree=nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] 113 else: 114 curclass=NCFile.groups[classtree[mod][0]] 115 nvdict['md'].__dict__[mod] = getattr(classtype[mod][1],classtype[mod][0])() 116 Tree=nvdict['md'].__dict__[classtree[mod][0]] 117 #treating groups that are lists 118 for i in range(0,max(1,len(curclass.groups))): 119 if len(curclass.groups)>0: 120 listclass=curclass.groups[keylist[i]] 121 else: 122 listclass=curclass 123 for var in listclass.variables: 124 if var not in ['errlog','outlog']: 125 varval=listclass.variables[str(var)] 126 vardim=varval.ndim 127 try: 128 val_type=str(varval.dtype) 129 except AttributeError: 130 val_type=type(varval) 131 if vardim==0: 132 if type(Tree)==list: 133 t=indexlist[i] 134 if listtype=='dict': 135 Tree[t][str(var)]=varval[0] 136 else: 137 Tree[t].__dict__[str(var)]=varval[0] 138 else: 139 if str(varval[0])=='': 140 Tree.__dict__[str(var)]=[] 141 elif varval[0]=='True': 142 Tree.__dict__[str(var)]=True 143 elif varval[0]=='False': 144 Tree.__dict__[str(var)]=False 145 else: 146 Tree.__dict__[str(var)]=varval[0] 147 148 elif vardim==1: 149 if varval.dtype==str: 150 if varval.shape[0]==1: 151 Tree.__dict__[str(var)]=[str(varval[0]),] 152 elif 'True' in varval[:] or 'False' in varval[:]: 153 Tree.__dict__[str(var)]=np.asarray([V=='True' for V in varval[:]],dtype=bool) 154 else: 155 Tree.__dict__[str(var)]=[str(vallue) for vallue in varval[:]] 156 else: 157 if type(Tree)==list: 158 t=indexlist[i] 159 if listtype=='dict': 160 Tree[t][str(var)]=varval[:] 161 else: 162 Tree[t].__dict__[str(var)]=varval[:] 163 else: 164 try: 165 #some thing specifically require a list 166 mdtype=type(Tree.__dict__[str(var)]) 167 except KeyError: 168 mdtype=float 169 if mdtype==list: 170 Tree.__dict__[str(var)]=[mdval for mdval in varval[:]] 171 else: 172 Tree.__dict__[str(var)]=varval[:] 173 elif vardim==2: 174 #dealling with dict 175 if varval.dtype==str: #that is for toolkits wich needs to be ordered 176 if any(varval[:,0]=='toolkit'): #toolkit definition have to be first 177 Tree.__dict__[str(var)]=OrderedDict([('toolkit', str(varval[np.where(varval[:,0]=='toolkit')[0][0],1]))]) 178 179 strings1=[str(arg[0]) for arg in varval if arg[0]!='toolkits'] 180 strings2=[str(arg[1]) for arg in varval if arg[0]!='toolkits'] 181 Tree.__dict__[str(var)].update(list(zip(strings1, strings2))) 182 else: 183 if type(Tree)==list: 184 #t=int(keylist[i][-1])-1 185 t=indexlist[i] 186 if listtype=='dict': 187 Tree[t][str(var)]=varval[:,:] 188 else: 189 Tree[t].__dict__[str(var)]=varval[:,:] 190 else: 191 Tree.__dict__[str(var)]=varval[:,:] 192 elif vardim==3: 193 if type(Tree)==list: 194 t=indexlist[i] 195 if listtype=='dict': 196 Tree[t][str(var)]=varval[:,:,:] 197 else: 198 Tree[t].__dict__[str(var)]=varval[:,:,:] 199 else: 200 Tree.__dict__[str(var)]=varval[:,:,:] 201 else: 202 print('table dimension greater than 3 not implemented yet') 203 for attr in listclass.ncattrs(): 204 if attr!='classtype': #classtype is for treatment, don't get it back 205 if type(Tree)==list: 206 t=indexlist[i] 207 if listtype=='dict': 208 Tree[t][str(attr).swapcase()]=str(listclass.getncattr(attr)) 209 else: 210 Tree[t].__dict__[str(attr).swapcase()]=str(listclass.getncattr(attr)) 211 else: 212 Tree.__dict__[str(attr).swapcase()]=str(listclass.getncattr(attr)) 213 if listclass.getncattr(attr)=='True': 214 Tree.__dict__[str(attr).swapcase()]=True 215 elif listclass.getncattr(attr)=='False': 216 Tree.__dict__[str(attr).swapcase()]=False 217 NCFile.close() 218 if len(args) >= 2 and isinstance(args[1],str): # (value) 219 value=[nvdict[name] for name in args[1:]] 220 return value 221 222 elif len(args) == 2 and isinstance(args[1],list): # ([values]) 223 value=[nvdict[name] for name in args[1]] 224 return value 225 226 elif (len(args) == 2 and isinstance(args[1],dict)) or (len(args) == 1): # ({names:values}) 227 return nvdict 17 """ 18 LOADVARS - function to load variables to a file. 19 20 This function loads one or more variables from a file. The names of the variables 21 must be supplied. If more than one variable is specified, it may be done with 22 a list of names or a dictionary of name as keys. The output type will correspond 23 to the input type. All the variables in the file may be loaded by specifying only 24 the file name. 25 26 Usage: 27 a=loadvars('shelve.dat','a') 28 [a,b]=loadvars('shelve.dat',['a','b']) 29 nvdict=loadvars('shelve.dat',{'a':None,'b':None}) 30 nvdict=loadvars('shelve.dat') 31 32 """ 33 34 filename = '' 35 nvdict = {} 36 37 if len(args) >= 1 and isinstance(args[0], str): 38 filename = args[0] 39 if not filename: 40 filename = '/tmp/shelve.dat' 41 42 else: 43 raise TypeError("Missing file name.") 44 45 if len(args) >= 2 and isinstance(args[1], str): # (filename,name) 46 for name in args[1:]: 47 nvdict[name] = None 48 49 elif len(args) == 2 and isinstance(args[1], list): # (filename,[names]) 50 for name in args[1]: 51 nvdict[name] = None 52 53 elif len(args) == 2 and isinstance(args[1], dict): # (filename,{names:values}) 54 nvdict = args[1] 55 56 elif len(args) == 1: # (filename) 57 pass 58 59 else: 60 raise TypeError("Unrecognized input arguments.") 61 62 if whichdb(filename): 63 print("Loading variables from file {}.".format(filename)) 64 my_shelf = shelve.open(filename, 'r') # 'r' for read-only 65 if nvdict: 66 for name in list(nvdict.keys()): 67 try: 68 nvdict[name] = my_shelf[name] 69 print(("Variable '%s' loaded." % name)) 70 except KeyError: 71 value = None 72 print("Variable '{}' not found.".format(name)) 73 74 else: 75 for name in list(my_shelf.keys()): 76 nvdict[name] = my_shelf[name] 77 print(("Variable '%s' loaded." % name)) 78 my_shelf.close() 79 80 else: 81 try: 82 NCFile = Dataset(filename, mode='r') 83 NCFile.close() 84 except RuntimeError: 85 raise IOError("File '{}' not found.".format(filename)) 86 87 classtype, classtree = netCDFread(filename) 88 nvdict['md'] = model() 89 NCFile = Dataset(filename, mode='r') 90 for mod in dict.keys(classtype): 91 #print('-Now treating classtype {}'.format(mod)) 92 if np.size(classtree[mod]) > 1: 93 curclass = NCFile.groups[classtree[mod][0]].groups[classtree[mod][1]] 94 if classtype[mod][0] == 'list': 95 keylist = [key for key in curclass.groups] 96 try: 97 steplist = [int(key) for key in curclass.groups] 98 except ValueError: 99 steplist = [int(findall(r'\d+', key)[0]) for key in keylist] 100 indexlist = [index * (len(curclass.groups) - 1) / max(1, max(steplist)) for index in steplist] 101 listtype = curclass.groups[keylist[0]].classtype 102 if listtype == 'dict': 103 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [OrderedDict() for i in range(max(1, len(curclass.groups)))] 104 else: 105 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = [getattr(__import__(listtype), listtype)() for i in range(max(1, len(curclass.groups)))] 106 Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]][:] 107 else: 108 nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] = getattr(classtype[mod][1], classtype[mod][0])() 109 Tree = nvdict['md'].__dict__[classtree[mod][0]].__dict__[classtree[mod][1]] 110 else: 111 curclass = NCFile.groups[classtree[mod][0]] 112 nvdict['md'].__dict__[mod] = getattr(classtype[mod][1], classtype[mod][0])() 113 Tree = nvdict['md'].__dict__[classtree[mod][0]] 114 #treating groups that are lists 115 for i in range(0, max(1, len(curclass.groups))): 116 if len(curclass.groups) > 0: 117 listclass = curclass.groups[keylist[i]] 118 else: 119 listclass = curclass 120 for var in listclass.variables: 121 #print("treating var {}".format(var)) 122 if var not in ['errlog', 'outlog']: 123 varval = listclass.variables[str(var)] 124 vardim = varval.ndim 125 if vardim == 0: 126 if type(Tree) == list: 127 t = int(indexlist[i]) 128 if listtype == 'dict': 129 Tree[t][str(var)] = varval[0] 130 131 else: 132 Tree[t].__dict__[str(var)] = varval[0] 133 134 else: 135 if str(varval[0]) == '': #no value 136 Tree.__dict__[str(var)] = [] 137 138 elif varval[0] == 'True': #treatin bool 139 Tree.__dict__[str(var)] = True 140 141 elif varval[0] == 'False': #treatin bool 142 Tree.__dict__[str(var)] = False 143 144 else: 145 Tree.__dict__[str(var)] = varval[0].item() 146 147 elif vardim == 1: 148 if varval.dtype == str: 149 if varval.shape[0] == 1: 150 Tree.__dict__[str(var)] = [str(varval[0]), ] 151 152 elif 'True' in varval[:] or 'False' in varval[:]: 153 Tree.__dict__[str(var)] = np.asarray([V == 'True' for V in varval[:]], dtype=bool) 154 155 else: 156 Tree.__dict__[str(var)] = [str(vallue) for vallue in varval[:]] 157 158 else: 159 if type(Tree) == list: 160 t = int(indexlist[i]) 161 if listtype == 'dict': 162 Tree[t][str(var)] = varval[:] 163 164 else: 165 Tree[t].__dict__[str(var)] = varval[:] 166 167 else: 168 try: 169 #some thing specifically require a list 170 mdtype = type(Tree.__dict__[str(var)]) 171 except KeyError: 172 mdtype = float 173 if mdtype == list: 174 Tree.__dict__[str(var)] = [mdval for mdval in varval[:]] 175 176 else: 177 Tree.__dict__[str(var)] = varval[:].data 178 179 elif vardim == 2: 180 #dealling with dict 181 if varval.dtype == str: #that is for toolkits wich needs to be ordered 182 if any(varval[:, 0] == 'toolkit'): #toolkit definition have to be first 183 Tree.__dict__[str(var)] = OrderedDict([('toolkit', str(varval[np.where(varval[:, 0] == 'toolkit')[0][0], 1]))]) 184 strings1 = [str(arg[0]) for arg in varval if arg[0] != 'toolkits'] 185 strings2 = [str(arg[1]) for arg in varval if arg[0] != 'toolkits'] 186 Tree.__dict__[str(var)].update(list(zip(strings1, strings2))) 187 else: 188 if type(Tree) == list: 189 t = int(indexlist[i]) 190 if listtype == 'dict': 191 Tree[t][str(var)] = varval[:, :] 192 else: 193 Tree[t].__dict__[str(var)] = varval[:, :] 194 else: 195 Tree.__dict__[str(var)] = varval[:, :].data 196 elif vardim == 3: 197 if type(Tree) == list: 198 t = int(indexlist[i]) 199 if listtype == 'dict': 200 Tree[t][str(var)] = varval[:, :, :] 201 else: 202 Tree[t].__dict__[str(var)] = varval[:, :, :] 203 else: 204 Tree.__dict__[str(var)] = varval[:, :, :].data 205 else: 206 print('table dimension greater than 3 not implemented yet') 207 for attr in listclass.ncattrs(): 208 if attr != 'classtype': #classtype is for treatment, don't get it back 209 if type(Tree) == list: 210 t = int(indexlist[i]) 211 if listtype == 'dict': 212 Tree[t][str(attr).swapcase()] = str(listclass.getncattr(attr)) 213 else: 214 Tree[t].__dict__[str(attr).swapcase()] = str(listclass.getncattr(attr)) 215 else: 216 Tree.__dict__[str(attr).swapcase()] = str(listclass.getncattr(attr)) 217 if listclass.getncattr(attr) == 'True': 218 Tree.__dict__[str(attr).swapcase()] = True 219 elif listclass.getncattr(attr) == 'False': 220 Tree.__dict__[str(attr).swapcase()] = False 221 NCFile.close() 222 if len(args) >= 2 and isinstance(args[1], str): # (value) 223 value = [nvdict[name] for name in args[1:]] 224 return value 225 226 elif len(args) == 2 and isinstance(args[1], list): # ([values]) 227 value = [nvdict[name] for name in args[1]] 228 return value 229 230 elif (len(args) == 2 and isinstance(args[1], dict)) or (len(args) == 1): # ({names:values}) 231 return nvdict 228 232 229 233 230 234 def netCDFread(filename): 231 232 NCData=Dataset(filename, 'r')233 class_dict={}234 class_tree={}235 236 237 if len(NCData.groups[group].groups)>0:238 239 classe=str(group)+'.'+str(subgroup)240 class_dict[classe]=[str(getattr(NCData.groups[group].groups[subgroup],'classtype')),]241 if class_dict[classe][0] not in ['dict','list','cell']:242 243 class_tree[classe]=[group,subgroup]244 245 classe=str(group)246 247 class_dict[classe]=[str(getattr(NCData.groups[group],'classtype')),]248 if class_dict[classe][0] not in ['dict','list','cell']:249 250 class_tree[classe]=[group,]251 252 253 254 return class_dict,class_tree235 print(('Opening {} for reading '.format(filename))) 236 NCData = Dataset(filename, 'r') 237 class_dict = {} 238 class_tree = {} 239 240 for group in NCData.groups: 241 if len(NCData.groups[group].groups) > 0: 242 for subgroup in NCData.groups[group].groups: 243 classe = str(group) + '.' + str(subgroup) 244 class_dict[classe] = [str(getattr(NCData.groups[group].groups[subgroup], 'classtype')), ] 245 if class_dict[classe][0] not in ['dict', 'list', 'cell']: 246 class_dict[classe].append(__import__(class_dict[classe][0])) 247 class_tree[classe] = [group, subgroup] 248 else: 249 classe = str(group) 250 try: 251 class_dict[classe] = [str(getattr(NCData.groups[group], 'classtype')), ] 252 if class_dict[classe][0] not in ['dict', 'list', 'cell']: 253 class_dict[classe].append(__import__(class_dict[classe][0])) 254 class_tree[classe] = [group, ] 255 except AttributeError: 256 print(('group {} is empty'.format(group))) 257 NCData.close() 258 return class_dict, class_tree -
issm/trunk-jpl/src/m/os/issmscpin.py
r23722 r23772 54 54 #string to copy multiple files using scp: 55 55 string="'{"+','.join([str(x) for x in packages])+"}'" 56 57 56 if port: 58 subprocess.call('scp -P %d %s@localhost:%s %s/. ' %(port,login,os.path.join(path,string),os.getcwd()),shell=True)57 subprocess.call('scp -P {} {}@localhost:{} {}/. '.format(port,login,os.path.join(path,string),os.getcwd()),shell=True) 59 58 else: 60 subprocess.call('scp -T %s@%s:%s %s/.' %(login,host,os.path.join(path,string),os.getcwd()),shell=True)59 subprocess.call('scp -T {}@{}:{} {}/.'.format(login,host,os.path.join(path,string),os.getcwd()),shell=True) 61 60 #check scp worked 62 61 for package in packages: -
issm/trunk-jpl/src/m/plot/plot_overlay.py
r23716 r23772 67 67 # normalize array 68 68 arr=arr/np.float(np.max(arr.ravel())) 69 69 arr=1.-arr # somehow the values got flipped 70 70 71 71 if options.getfieldvalue('overlayhist',0)==1: -
issm/trunk-jpl/src/m/solve/WriteData.py
r23768 r23772 1 from struct import pack, error 1 2 import numpy as np 2 from struct import pack,unpack3 3 import pairoptions 4 4 5 def WriteData(fid,prefix,*args): 6 """ 7 WRITEDATA - write model field in binary file 8 9 Usage: 10 WriteData(fid,varargin) 11 """ 12 13 #process options 14 options=pairoptions.pairoptions(*args) 15 16 #Get data properties 17 if options.exist('object'): 18 #This is an object field, construct enum and data 19 obj = options.getfieldvalue('object') 20 fieldname = options.getfieldvalue('fieldname') 21 classname = options.getfieldvalue('class',str(type(obj)).rsplit('.')[-1].split("'")[0]) 22 name = options.getfieldvalue('name',prefix+'.'+fieldname); 23 if options.exist('data'): 24 data = options.getfieldvalue('data') 25 else: 26 data = getattr(obj,fieldname) 27 else: 28 #No processing required 29 data = options.getfieldvalue('data') 30 name = options.getfieldvalue('name') 31 32 datatype = options.getfieldvalue('format') 33 mattype = options.getfieldvalue('mattype',0) #only required for matrices 34 timeserieslength = options.getfieldvalue('timeserieslength',-1) 35 36 #Process sparse matrices 37 # if issparse(data), 38 # data=full(data); 39 # end 40 41 #Scale data if necesarry 42 if options.exist('scale'): 43 data=np.array(data) 44 scale = options.getfieldvalue('scale') 45 if np.size(data) > 1 and np.ndim(data) > 1 and np.size(data,0)==timeserieslength: 46 data[0:-1,:] = scale*data[0:-1,:] 47 else: 48 data = scale*data 49 if np.size(data) > 1 and np.size(data,0)==timeserieslength: 50 yts = options.getfieldvalue('yts') 51 if np.ndim(data) > 1: 52 data[-1,:] = yts*data[-1,:] 53 else: 54 data[-1] = yts*data[-1] 55 56 #Step 1: write the enum to identify this record uniquely 57 fid.write(pack('i',len(name))) 58 fid.write(pack('{}s'.format(len(name)),name.encode())) 59 60 61 #Step 2: write the data itself. 62 if datatype=='Boolean': # {{{ 63 # if len(data) !=1: 64 # raise ValueError('fi eld %s cannot be marshalled as it has more than one element!' % name[0]) 65 66 #first write length of record 67 fid.write(pack('q',4+4)) #1 bool (disguised as an int)+code 68 69 #write data code: 70 fid.write(pack('i',FormatToCode(datatype))) 71 72 #now write integer 73 fid.write(pack('i',int(data))) #send an int, not easy to send a bool 74 # }}} 75 76 elif datatype=='Integer': # {{{ 77 # if len(data) !=1: 78 # raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0]) 79 80 #first write length of record 81 fid.write(pack('q',4+4)) #1 integer + code 82 83 #write data code: 84 fid.write(pack('i',FormatToCode(datatype))) 85 86 #now write integer 87 fid.write(pack('i',int(data))) #force an int, 88 # }}} 89 90 elif datatype=='Double': # {{{ 91 # if len(data) !=1: 92 # raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0]) 93 94 #first write length of record 95 fid.write(pack('q',8+4)) #1 double+code 96 97 #write data code: 98 fid.write(pack('i',FormatToCode(datatype))) 99 100 #now write double 101 fid.write(pack('d',data)) 102 # }}} 103 104 elif datatype=='String': # {{{ 105 #first write length of record 106 fid.write(pack('q',len(data)+4+4)) #string + string size + code 107 108 #write data code: 109 fid.write(pack('i',FormatToCode(datatype))) 110 111 #now write string 112 fid.write(pack('i',len(data))) 113 fid.write(pack('{}s'.format(len(data)),data.encode())) 114 # }}} 115 116 elif datatype in ['IntMat','BooleanMat']: # {{{ 117 118 if isinstance(data,(int,bool)): 119 data=np.array([data]) 120 elif isinstance(data,(list,tuple)): 121 data=np.array(data).reshape(-1,) 122 if np.ndim(data) == 1: 123 if np.size(data): 124 data=data.reshape(np.size(data),) 125 else: 126 data=data.reshape(0,0) 127 128 #Get size 129 s=data.shape 130 #if matrix = NaN, then do not write anything 131 if np.ndim(data)==2 and np.product(s)==1 and np.all(np.isnan(data)): 132 s=(0,0) 133 134 #first write length of record 135 fid.write(pack('q',4+4+8*np.product(s)+4+4)) #2 integers (32 bits) + the double matrix + code + matrix type 136 137 #write data code and matrix type: 138 fid.write(pack('i',FormatToCode(datatype))) 139 fid.write(pack('i',mattype)) 140 141 #now write matrix 142 if np.ndim(data) == 1: 143 fid.write(pack('i',s[0])) 144 fid.write(pack('i',1)) 145 for i in range(s[0]): 146 fid.write(pack('d',float(data[i]))) #get to the "c" convention, hence the transpose 147 else: 148 fid.write(pack('i',s[0])) 149 fid.write(pack('i',s[1])) 150 for i in range(s[0]): 151 for j in range(s[1]): 152 fid.write(pack('d',float(data[i][j]))) #get to the "c" convention, hence the transpose 153 # }}} 154 155 elif datatype=='DoubleMat': # {{{ 156 157 if isinstance(data,(bool,int,float)): 158 data=np.array([data]) 159 elif isinstance(data,(list,tuple)): 160 data=np.array(data).reshape(-1,) 161 if np.ndim(data) == 1: 162 if np.size(data): 163 data=data.reshape(np.size(data),) 164 else: 165 data=data.reshape(0,0) 166 167 #Get size 168 s=data.shape 169 #if matrix = NaN, then do not write anything 170 if np.ndim(data)==1 and np.product(s)==1 and np.all(np.isnan(data)): 171 s=(0,0) 172 173 #first write length of record 174 recordlength=4+4+8*np.product(s)+4+4; #2 integers (32 bits) + the double matrix + code + matrix type 175 if recordlength > 4**31 : 176 raise ValueError('field {} cannot be marshalled because it is larger than 4^31 bytes!'.format(enum)) 177 178 fid.write(pack('q',recordlength)) #2 integers (32 bits) + the double matrix + code + matrix type 179 180 #write data code and matrix type: 181 fid.write(pack('i',FormatToCode(datatype))) 182 fid.write(pack('i',mattype)) 183 184 #now write matrix 185 if np.ndim(data) == 1: 186 fid.write(pack('i',s[0])) 187 fid.write(pack('i',1)) 188 for i in range(s[0]): 189 fid.write(pack('d',float(data[i]))) #get to the "c" convention, hence the transpose 190 else: 191 fid.write(pack('i',s[0])) 192 fid.write(pack('i',s[1])) 193 for i in range(s[0]): 194 for j in range(s[1]): 195 fid.write(pack('d',float(data[i][j]))) #get to the "c" convention, hence the transpose 196 # }}} 197 198 elif datatype=='CompressedMat': # {{{ 199 200 if isinstance(data,(bool,int,float)): 201 data=np.array([data]) 202 elif isinstance(data,(list,tuple)): 203 data=np.array(data).reshape(-1,) 204 if np.ndim(data) == 1: 205 if np.size(data): 206 data=data.reshape(np.size(data),) 207 else: 208 data=data.reshape(0,0) 209 210 #Get size 211 s=data.shape 212 if np.ndim(data) == 1: 213 n2=1 214 else: 215 n2=s[1] 216 217 #if matrix = NaN, then do not write anything 218 if np.ndim(data)==1 and np.product(s)==1 and np.all(np.isnan(data)): 219 s=(0,0) 220 n2=0 221 222 #first write length of record 223 recordlength=4+4+8+8+1*(s[0]-1)*n2+8*n2+4+4 #2 integers (32 bits) + the matrix + code + matrix type 224 if recordlength > 4**31 : 225 raise ValueError('field %s cannot be marshalled because it is larger than 4^31 bytes!' % enum) 226 227 fid.write(pack('q',recordlength)) #2 integers (32 bits) + the matrix + code + matrix type 228 229 #write data code and matrix type: 230 fid.write(pack('i',FormatToCode(datatype))) 231 fid.write(pack('i',mattype)) 232 233 #Write offset and range 234 A = data[0:s[0]-1] 235 offsetA = A.min() 236 rangeA = A.max() - offsetA 237 238 if rangeA == 0: 239 A = A*0 240 else: 241 A = (A-offsetA)/rangeA*255. 242 243 #now write matrix 244 if np.ndim(data) == 1: 245 fid.write(pack('i',s[0])) 246 fid.write(pack('i',1)) 247 fid.write(pack('d',float(offsetA))) 248 fid.write(pack('d',float(rangeA))) 249 for i in range(s[0]-1): 250 fid.write(pack('B',int(A[i]))) 251 252 fid.write(pack('d',float(data[s[0]-1]))) #get to the "c" convention, hence the transpose 253 254 elif np.product(s) > 0: 255 fid.write(pack('i',s[0])) 256 fid.write(pack('i',s[1])) 257 fid.write(pack('d',float(offsetA))) 258 fid.write(pack('d',float(rangeA))) 259 for i in range(s[0]-1): 260 for j in range(s[1]): 261 fid.write(pack('B',int(A[i][j]))) #get to the "c" convention, hence the transpose 262 263 for j in range(s[1]): 264 fid.write(pack('d',float(data[s[0]-1][j]))) 265 266 # }}} 267 268 elif datatype=='MatArray': # {{{ 269 270 #first get length of record 271 recordlength=4+4 #number of records + code 272 for matrix in data: 273 if isinstance(matrix,(bool,int,float)): 274 matrix=np.array([matrix]) 275 elif isinstance(matrix,(list,tuple)): 276 matrix=np.array(matrix).reshape(-1,) 277 if np.ndim(matrix) == 1: 278 if np.size(matrix): 279 matrix=matrix.reshape(np.size(matrix),) 280 else: 281 matrix=matrix.reshape(0,0) 282 283 s=matrix.shape 284 recordlength+=4*2+np.product(s)*8 #row and col of matrix + matrix of doubles 285 286 #write length of record 287 fid.write(pack('q',recordlength)) 288 289 #write data code: 290 fid.write(pack('i',FormatToCode(datatype))) 291 292 #write data, first number of records 293 fid.write(pack('i',len(data))) 294 295 for matrix in data: 296 if isinstance(matrix,(bool,int,float)): 297 matrix=np.array([matrix]) 298 elif isinstance(matrix,(list,tuple)): 299 matrix=np.array(matrix).reshape(-1,) 300 if np.ndim(matrix) == 1: 301 matrix=matrix.reshape(np.size(matrix),) 302 303 s=matrix.shape 304 305 if np.ndim(matrix) == 1: 306 fid.write(pack('i',s[0])) 307 fid.write(pack('i',1)) 308 for i in range(s[0]): 309 fid.write(pack('d',float(matrix[i]))) #get to the "c" convention, hence the transpose 310 else: 311 fid.write(pack('i',s[0])) 312 fid.write(pack('i',s[1])) 313 for i in range(s[0]): 314 for j in range(s[1]): 315 fid.write(pack('d',float(matrix[i][j]))) 316 # }}} 317 318 elif datatype=='StringArray': # {{{ 319 320 #first get length of record 321 recordlength=4+4 #for length of array + code 322 for string in data: 323 recordlength+=4+len(string) #for each string 324 325 #write length of record 326 fid.write(pack('q',recordlength)) 327 328 #write data code: 329 fid.write(pack('i',FormatToCode(datatype))) 330 331 #now write length of string array 332 fid.write(pack('i',len(data))) 333 334 #now write the strings 335 for string in data: 336 fid.write(pack('i',len(string))) 337 fid.write(pack('{}s'.format(len(string)),string.encode())) 338 # }}} 339 340 else: # {{{ 341 raise TypeError('WriteData error message: data type: {} not supported yet! ({})'.format(datatype,enum)) 342 # }}} 343 344 def FormatToCode(datatype): # {{{ 345 """ 346 This routine takes the datatype string, and hardcodes it into an integer, which 347 is passed along the record, in order to identify the nature of the dataset being 348 sent. 349 """ 350 351 if datatype=='Boolean': 352 code=1 353 elif datatype=='Integer': 354 code=2 355 elif datatype=='Double': 356 code=3 357 elif datatype=='String': 358 code=4 359 elif datatype=='BooleanMat': 360 code=5 361 elif datatype=='IntMat': 362 code=6 363 elif datatype=='DoubleMat': 364 code=7 365 elif datatype=='MatArray': 366 code=8 367 elif datatype=='StringArray': 368 code=9 369 elif datatype=='CompressedMat': 370 code=10 371 else: 372 raise InputError('FormatToCode error message: data type not supported yet!') 373 374 return code 5 6 def WriteData(fid, prefix, *args): 7 """ 8 WRITEDATA - write model field in binary file 9 10 Usage: 11 WriteData(fid, varargin) 12 """ 13 14 #process options 15 options = pairoptions.pairoptions(*args) 16 17 #Get data properties 18 if options.exist('object'): 19 #This is an object field, construct enum and data 20 obj = options.getfieldvalue('object') 21 fieldname = options.getfieldvalue('fieldname') 22 name = options.getfieldvalue('name', prefix + '.' + fieldname) 23 if options.exist('data'): 24 data = options.getfieldvalue('data') 25 else: 26 data = getattr(obj, fieldname) 27 else: 28 #No processing required 29 data = options.getfieldvalue('data') 30 name = options.getfieldvalue('name') 31 32 datatype = options.getfieldvalue('format') 33 mattype = options.getfieldvalue('mattype', 0) #only required for matrices 34 timeserieslength = options.getfieldvalue('timeserieslength', -1) 35 36 #Process sparse matrices 37 # if issparse(data), 38 # data = full(data); 39 # end 40 41 #Scale data if necesarry 42 if options.exist('scale'): 43 data = np.array(data) 44 scale = options.getfieldvalue('scale') 45 if np.size(data) > 1 and np.ndim(data) > 1 and np.size(data, 0) == timeserieslength: 46 data[0:-1, :] = scale * data[0:-1, :] 47 else: 48 data = scale * data 49 if np.size(data) > 1 and np.size(data, 0) == timeserieslength: 50 yts = options.getfieldvalue('yts') 51 if np.ndim(data) > 1: 52 data[-1, :] = yts * data[-1, :] 53 else: 54 data[-1] = yts * data[-1] 55 56 #Step 1: write the enum to identify this record uniquely 57 fid.write(pack('i', len(name))) 58 fid.write(pack('{}s'.format(len(name)), name.encode())) 59 60 #Step 2: write the data itself. 61 if datatype == 'Boolean': # {{{ 62 #first write length of record 63 fid.write(pack('q', 4 + 4)) #1 bool (disguised as an int) + code 64 #write data code: 65 fid.write(pack('i', FormatToCode(datatype))) 66 67 #now write bool as an integer 68 try: 69 fid.write(pack('i', int(data))) #send an int, not easy to send a bool 70 except error as Err: 71 raise ValueError('field {} cannot be marshaled, {}'.format(name, Err)) 72 # }}} 73 74 elif datatype == 'Integer': # {{{ 75 #first write length of record 76 fid.write(pack('q', 4 + 4)) #1 integer + code 77 78 #write data code: 79 fid.write(pack('i', FormatToCode(datatype))) 80 81 #now write integer 82 try: 83 fid.write(pack('i', int(data))) #force an int, 84 except error as Err: 85 raise ValueError('field {} cannot be marshaled, {}'.format(name, Err)) 86 # }}} 87 88 elif datatype == 'Double': # {{{ 89 #first write length of record 90 fid.write(pack('q', 8 + 4)) #1 double + code 91 92 #write data code: 93 fid.write(pack('i', FormatToCode(datatype))) 94 95 #now write double 96 try: 97 fid.write(pack('d', data)) 98 except error as Err: 99 raise ValueError('field {} cannot be marshaled, {}'.format(name, Err)) 100 # }}} 101 102 elif datatype == 'String': # {{{ 103 #first write length of record 104 fid.write(pack('q', len(data) + 4 + 4)) #string + string size + code 105 106 #write data code: 107 fid.write(pack('i', FormatToCode(datatype))) 108 109 #now write string 110 fid.write(pack('i', len(data))) 111 fid.write(pack('{}s'.format(len(data)), data.encode())) 112 # }}} 113 114 elif datatype in ['IntMat', 'BooleanMat']: # {{{ 115 if isinstance(data, (int, bool)): 116 data = np.array([data]) 117 elif isinstance(data, (list, tuple)): 118 data = np.array(data).reshape(-1,) 119 if np.ndim(data) == 1: 120 if np.size(data): 121 data = data.reshape(np.size(data),) 122 else: 123 data = data.reshape(0, 0) 124 125 #Get size 126 s = data.shape 127 #if matrix = NaN, then do not write anything 128 if np.ndim(data) == 2 and np.product(s) == 1 and np.all(np.isnan(data)): 129 s = (0, 0) 130 131 #first write length of record 132 recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 #2 integers (32 bits) + the double matrix + code + matrix type 133 fid.write(pack('q', recordlength)) 134 135 #write data code and matrix type: 136 fid.write(pack('i', FormatToCode(datatype))) 137 fid.write(pack('i', mattype)) 138 139 #now write matrix 140 fid.write(pack('i', s[0])) 141 try: 142 fid.write(pack('i', s[1])) 143 except IndexError: 144 fid.write(pack('i', 1)) 145 for i in range(s[0]): 146 if np.ndim(data) == 1: 147 fid.write(pack('d', float(data[i]))) #get to the "c" convention, hence the transpose 148 else: 149 for j in range(s[1]): 150 fid.write(pack('d', float(data[i][j]))) #get to the "c" convention, hence the transpose 151 # }}} 152 153 elif datatype == 'DoubleMat': # {{{ 154 155 if isinstance(data, (bool, int, float)): 156 data = np.array([data]) 157 elif isinstance(data, (list, tuple)): 158 data = np.array(data).reshape(-1,) 159 if np.ndim(data) == 1: 160 if np.size(data): 161 data = data.reshape(np.size(data),) 162 else: 163 data = data.reshape(0, 0) 164 165 #Get size 166 s = data.shape 167 #if matrix = NaN, then do not write anything 168 if np.ndim(data) == 1 and np.product(s) == 1 and np.all(np.isnan(data)): 169 s = (0, 0) 170 171 #first write length of record 172 recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 #2 integers (32 bits) + the double matrix + code + matrix type 173 174 try: 175 fid.write(pack('q', recordlength)) 176 except error as Err: 177 raise ValueError('Field {} can not be marshaled, {}, with "number" the lenght of the record.'.format(name, Err)) 178 179 #write data code and matrix type: 180 fid.write(pack('i', FormatToCode(datatype))) 181 fid.write(pack('i', mattype)) 182 #now write matrix 183 fid.write(pack('i', s[0])) 184 try: 185 fid.write(pack('i', s[1])) 186 except IndexError: 187 fid.write(pack('i', 1)) 188 for i in range(s[0]): 189 if np.ndim(data) == 1: 190 fid.write(pack('d', float(data[i]))) #get to the "c" convention, hence the transpose 191 else: 192 for j in range(s[1]): 193 fid.write(pack('d', float(data[i][j]))) #get to the "c" convention, hence the transpose 194 # }}} 195 196 elif datatype == 'CompressedMat': # {{{ 197 if isinstance(data, (bool, int, float)): 198 data = np.array([data]) 199 elif isinstance(data, (list, tuple)): 200 data = np.array(data).reshape(-1,) 201 if np.ndim(data) == 1: 202 if np.size(data): 203 data = data.reshape(np.size(data),) 204 else: 205 data = data.reshape(0, 0) 206 207 #Get size 208 s = data.shape 209 if np.ndim(data) == 1: 210 n2 = 1 211 else: 212 n2 = s[1] 213 214 #if matrix = NaN, then do not write anything 215 if np.ndim(data) == 1 and np.product(s) == 1 and np.all(np.isnan(data)): 216 s = (0, 0) 217 n2 = 0 218 219 #first write length of record 220 recordlength = 4 + 4 + 8 + 8 + 1 * (s[0] - 1) * n2 + 8 * n2 + 4 + 4 #2 integers (32 bits) + the matrix + code + matrix type 221 try: 222 fid.write(pack('q', recordlength)) 223 except error as Err: 224 raise ValueError('Field {} can not be marshaled, {}, with "number" the lenght of the record.'.format(name, Err)) 225 226 #write data code and matrix type: 227 fid.write(pack('i', FormatToCode(datatype))) 228 fid.write(pack('i', mattype)) 229 230 #Write offset and range 231 A = data[0:s[0] - 1] 232 offsetA = A.min() 233 rangeA = A.max() - offsetA 234 235 if rangeA == 0: 236 A = A * 0 237 else: 238 A = (A - offsetA) / rangeA * 255. 239 240 #now write matrix 241 fid.write(pack('i', s[0])) 242 try: 243 fid.write(pack('i', s[1])) 244 except IndexError: 245 fid.write(pack('i', 1)) 246 fid.write(pack('d', float(offsetA))) 247 fid.write(pack('d', float(rangeA))) 248 if np.ndim(data) == 1: 249 for i in range(s[0] - 1): 250 fid.write(pack('B', int(A[i]))) 251 fid.write(pack('d', float(data[s[0] - 1]))) #get to the "c" convention, hence the transpose 252 253 elif np.product(s) > 0: 254 for i in range(s[0] - 1): 255 for j in range(s[1]): 256 fid.write(pack('B', int(A[i][j]))) #get to the "c" convention, hence the transpose 257 258 for j in range(s[1]): 259 fid.write(pack('d', float(data[s[0] - 1][j]))) 260 261 # }}} 262 263 elif datatype == 'MatArray': # {{{ 264 265 #first get length of record 266 recordlength = 4 + 4 #number of records + code 267 for matrix in data: 268 if isinstance(matrix, (bool, int, float)): 269 matrix = np.array([matrix]) 270 elif isinstance(matrix, (list, tuple)): 271 matrix = np.array(matrix).reshape(-1,) 272 if np.ndim(matrix) == 1: 273 if np.size(matrix): 274 matrix = matrix.reshape(np.size(matrix),) 275 else: 276 matrix = matrix.reshape(0, 0) 277 278 s = matrix.shape 279 recordlength += 4 * 2 + np.product(s) * 8 #row and col of matrix + matrix of doubles 280 281 #write length of record 282 fid.write(pack('q', recordlength)) 283 284 #write data code: 285 fid.write(pack('i', FormatToCode(datatype))) 286 287 #write data, first number of records 288 fid.write(pack('i', len(data))) 289 290 for matrix in data: 291 if isinstance(matrix, (bool, int, float)): 292 matrix = np.array([matrix]) 293 elif isinstance(matrix, (list, tuple)): 294 matrix = np.array(matrix).reshape(-1,) 295 if np.ndim(matrix) == 1: 296 matrix = matrix.reshape(np.size(matrix),) 297 298 s = matrix.shape 299 300 fid.write(pack('i', s[0])) 301 try: 302 fid.write(pack('i', s[1])) 303 except IndexError: 304 fid.write(pack('i', 1)) 305 for i in range(s[0]): 306 if np.ndim(matrix) == 1: 307 fid.write(pack('d', float(matrix[i]))) #get to the "c" convention, hence the transpose 308 else: 309 for j in range(s[1]): 310 fid.write(pack('d', float(matrix[i][j]))) 311 # }}} 312 313 elif datatype == 'StringArray': # {{{ 314 #first get length of record 315 recordlength = 4 + 4 #for length of array + code 316 for string in data: 317 recordlength += 4 + len(string) #for each string 318 319 #write length of record 320 fid.write(pack('q', recordlength)) 321 322 #write data code: 323 fid.write(pack('i', FormatToCode(datatype))) 324 325 #now write length of string array 326 fid.write(pack('i', len(data))) 327 328 #now write the strings 329 for string in data: 330 fid.write(pack('i', len(string))) 331 fid.write(pack('{}s'.format(len(string)), string.encode())) 332 # }}} 333 334 else: # {{{ 335 raise TypeError('WriteData error message: data type: {} not supported yet! ({})'.format(datatype, name)) 336 # }}} 337 338 339 def FormatToCode(datatype): # {{{ 340 """ 341 This routine takes the datatype string, and hardcodes it into an integer, which 342 is passed along the record, in order to identify the nature of the dataset being 343 sent. 344 """ 345 if datatype == 'Boolean': 346 code = 1 347 elif datatype == 'Integer': 348 code = 2 349 elif datatype == 'Double': 350 code = 3 351 elif datatype == 'String': 352 code = 4 353 elif datatype == 'BooleanMat': 354 code = 5 355 elif datatype == 'IntMat': 356 code = 6 357 elif datatype == 'DoubleMat': 358 code = 7 359 elif datatype == 'MatArray': 360 code = 8 361 elif datatype == 'StringArray': 362 code = 9 363 elif datatype == 'CompressedMat': 364 code = 10 365 else: 366 raise IOError('FormatToCode error message: data type not supported yet!') 367 return code 375 368 # }}} -
issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py
r23716 r23772 4 4 import results as resultsclass 5 5 6 def parseresultsfromdisk(md,filename,iosplit): 7 if iosplit: 8 saveres=parseresultsfromdiskiosplit(md,filename) 9 else: 10 saveres=parseresultsfromdiskioserial(md,filename) 11 return saveres 12 13 def parseresultsfromdiskioserial(md,filename): # {{{ 14 #Open file 15 try: 16 fid=open(filename,'rb') 17 except IOError as e: 18 raise IOError("loadresultsfromdisk error message: could not open '{}' for binary reading.".format(filename)) 19 20 #initialize results: 21 saveres=[] 22 23 #Read fields until the end of the file. 24 loadres=ReadData(fid,md) 25 26 counter=0 27 check_nomoresteps=0 28 step=loadres['step'] 29 30 while loadres: 31 #check that the new result does not add a step, which would be an error: 32 if check_nomoresteps: 33 if loadres['step']>=1: 34 raise TypeError("parsing results for a steady-state core, which incorporates transient results!") 35 36 #Check step, increase counter if this is a new step 37 if(step!=loadres['step'] and loadres['step']>1): 38 counter = counter + 1 39 step = loadres['step'] 40 41 #Add result 42 if loadres['step']==0: 43 #if we have a step = 0, this is a steady state solution, don't expect more steps. 44 index = 0; 45 check_nomoresteps=1 46 elif loadres['step']==1: 47 index = 0 48 else: 49 index = counter; 50 51 if index > len(saveres)-1: 52 for i in range(len(saveres)-1,index-1): 53 saveres.append(None) 54 saveres.append(resultsclass.results()) 55 elif saveres[index] is None: 56 saveres[index]=resultsclass.results() 57 58 #Get time and step 59 if loadres['step'] != -9999.: 60 saveres[index].__dict__['step']=loadres['step'] 61 if loadres['time'] != -9999.: 62 saveres[index].__dict__['time']=loadres['time'] 63 64 #Add result 65 saveres[index].__dict__[loadres['fieldname']]=loadres['field'] 66 67 #read next result 68 loadres=ReadData(fid,md) 69 70 fid.close() 71 72 return saveres 73 # }}} 74 def parseresultsfromdiskiosplit(md,filename): # {{{ 75 76 #Open file 77 try: 78 fid=open(filename,'rb') 79 except IOError as e: 80 raise IOError("loadresultsfromdisk error message: could not open '{}' for binary reading.".format(filename)) 81 82 saveres=[] 83 84 #if we have done split I/O, ie, we have results that are fragmented across patches, 85 #do a first pass, and figure out the structure of results 86 loadres=ReadDataDimensions(fid) 87 while loadres: 88 89 #Get time and step 90 if loadres['step'] > len(saveres): 91 for i in range(len(saveres),loadres['step']-1): 92 saveres.append(None) 93 saveres.append(resultsclass.results()) 94 setattr(saveres[loadres['step']-1],'step',loadres['step']) 95 setattr(saveres[loadres['step']-1],'time',loadres['time']) 96 97 #Add result 98 setattr(saveres[loadres['step']-1],loadres['fieldname'],float('NaN')) 99 100 #read next result 101 loadres=ReadDataDimensions(fid) 102 103 #do a second pass, and figure out the size of the patches 104 fid.seek(0) #rewind 105 loadres=ReadDataDimensions(fid) 106 while loadres: 107 108 #read next result 109 loadres=ReadDataDimensions(fid) 110 111 #third pass, this time to read the real information 112 fid.seek(0) #rewind 113 loadres=ReadData(fid,md) 114 while loadres: 115 116 #Get time and step 117 if loadres['step']> len(saveres): 118 for i in range(len(saveres),loadres['step']-1): 119 saveres.append(None) 120 saveres.append(saveresclass.saveres()) 121 setattr(saveres[loadres['step']-1],'step',loadres['step']) 122 setattr(saveres[loadres['step']-1],'time',loadres['time']) 123 124 #Add result 125 setattr(saveres[loadres['step']-1],loadres['fieldname'],loadres['field']) 126 127 #read next result 128 loadres=ReadData(fid,md) 129 130 #close file 131 fid.close() 132 133 return saveres 134 # }}} 135 136 def ReadData(fid,md): # {{{ 137 """ 138 READDATA - ... 139 140 Usage: 141 field=ReadData(fid,md) 142 """ 143 144 #read field 145 try: 146 length=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 147 fieldname=struct.unpack('{}s'.format(length),fid.read(length))[0][:-1] 148 fieldname=fieldname.decode() #strings are booleans when stored so need to be converted back 149 time=struct.unpack('d',fid.read(struct.calcsize('d')))[0] 150 step=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 151 datatype=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 152 M=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 153 if datatype==1: 154 field=np.array(struct.unpack('{}d'.format(M),fid.read(M*struct.calcsize('d'))),dtype=float) 155 156 elif datatype==2: 157 field=struct.unpack('{}s'.format(M),fid.read(M))[0][:-1] 158 field=field.decode() 159 160 elif datatype==3: 161 N=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 162 # field=transpose(fread(fid,[N M],'double')); 163 field=np.zeros(shape=(M,N),dtype=float) 164 for i in range(M): 165 field[i,:]=struct.unpack('{}d'.format(N),fid.read(N*struct.calcsize('d'))) 166 167 elif datatype==4: 168 N=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 169 # field=transpose(fread(fid,[N M],'int')); 170 field=np.zeros(shape=(M,N),dtype=int) 171 for i in range(M): 172 field[i,:]=struct.unpack('{}i'.format(N),fid.read(N*struct.calcsize('i'))) 173 174 else: 175 raise TypeError("cannot read data of datatype {}".format(datatype)) 176 177 #Process units here FIXME: this should not be done here! 178 yts=md.constants.yts 179 if fieldname=='BalancethicknessThickeningRate': 180 field = field*yts 181 elif fieldname=='HydrologyWaterVx': 182 field = field*yts 183 elif fieldname=='HydrologyWaterVy': 184 field = field*yts 185 elif fieldname=='Vx': 186 field = field*yts 187 elif fieldname=='Vy': 188 field = field*yts 189 elif fieldname=='Vz': 190 field = field*yts 191 elif fieldname=='Vel': 192 field = field*yts 193 elif fieldname=='BasalforcingsGroundediceMeltingRate': 194 field = field*yts 195 elif fieldname=='BasalforcingsFloatingiceMeltingRate': 196 field = field*yts 197 elif fieldname=='TotalFloatingBmb': 198 field = field/10.**12*yts #(GigaTon/year) 199 elif fieldname=='TotalFloatingBmbScaled': 200 field = field/10.**12*yts #(GigaTon/year) 201 elif fieldname=='TotalGroundedBmb': 202 field = field/10.**12*yts #(GigaTon/year) 203 elif fieldname=='TotalGroundedBmbScaled': 204 field = field/10.**12*yts #(GigaTon/year) 205 elif fieldname=='TotalSmb': 206 field = field/10.**12*yts #(GigaTon/year) 207 elif fieldname=='TotalSmbScaled': 208 field = field/10.**12*yts #(GigaTon/year) 209 elif fieldname=='SmbMassBalance': 210 field = field*yts 211 elif fieldname=='SmbPrecipitation': 212 field = field*yts 213 elif fieldname=='SmbRunoff': 214 field = field*yts 215 elif fieldname=='SmbEvaporation': 216 field = field*yts; 217 elif fieldname=='SmbRefreeze': 218 field = field*yts; 219 elif fieldname=='SmbEC': 220 field = field*yts 221 elif fieldname=='SmbAccumulation': 222 field = field*yts 223 elif fieldname=='SmbMelt': 224 field = field*yts 225 elif fieldname=='SmbMAdd': 226 field = field*yts 227 elif fieldname=='CalvingCalvingrate': 228 field = field*yts 229 elif fieldname=='LoveKernelsReal' or fieldname=='LoveKernelsImag': 230 nlayer = md.materials.numlayers 231 degmax = md.love.sh_nmax 232 nfreq = md.love.nfreq 233 #for numpy 1.8+ only 234 #temp_field = np.full((degmax+1,nfreq,nlayer+1,6),0.0) 235 temp_field = np.empty((degmax+1,nfreq,nlayer+1,6)) 236 temp_field.fill(0.0) 237 for ii in range(degmax+1): 238 for jj in range(nfreq): 239 for kk in range(nlayer+1): 240 ll = ii*(nlayer+1)*6 + (kk*6+1) 241 for mm in range(6): 242 temp_field[ii,jj,kk,mm] = field[ll+mm-1,jj] 243 field=temp_field 244 245 if time !=-9999: 246 time = time/yts 247 248 saveres=OrderedDict() 249 saveres['fieldname']=fieldname 250 saveres['time']=time 251 saveres['step']=step 252 saveres['field']=field 253 254 except struct.error as e: 255 saveres=None 256 257 return saveres 258 # }}} 6 7 def parseresultsfromdisk(md, filename, iosplit): 8 if iosplit: 9 saveres = parseresultsfromdiskiosplit(md, filename) 10 else: 11 saveres = parseresultsfromdiskioserial(md, filename) 12 return saveres 13 14 15 def parseresultsfromdiskioserial(md, filename): # {{{ 16 #Open file 17 try: 18 fid = open(filename, 'rb') 19 except IOError as e: 20 raise IOError("loadresultsfromdisk error message: could not open '{}' for binary reading.".format(filename)) 21 22 #initialize results: 23 saveres = [] 24 25 #Read fields until the end of the file. 26 loadres = ReadData(fid, md) 27 28 counter = 0 29 check_nomoresteps = 0 30 step = loadres['step'] 31 32 while loadres: 33 #check that the new result does not add a step, which would be an error: 34 if check_nomoresteps: 35 if loadres['step'] >= 1: 36 raise TypeError("parsing results for a steady-state core, which incorporates transient results!") 37 38 #Check step, increase counter if this is a new step 39 if(step != loadres['step'] and loadres['step'] > 1): 40 counter = counter + 1 41 step = loadres['step'] 42 43 #Add result 44 if loadres['step'] == 0: 45 #if we have a step = 0, this is a steady state solution, don't expect more steps. 46 index = 0 47 check_nomoresteps = 1 48 elif loadres['step'] == 1: 49 index = 0 50 else: 51 index = counter 52 53 if index > len(saveres) - 1: 54 for i in range(len(saveres) - 1, index - 1): 55 saveres.append(None) 56 saveres.append(resultsclass.results()) 57 elif saveres[index] is None: 58 saveres[index] = resultsclass.results() 59 60 #Get time and step 61 if loadres['step'] != -9999.: 62 saveres[index].__dict__['step'] = loadres['step'] 63 if loadres['time'] != -9999.: 64 saveres[index].__dict__['time'] = loadres['time'] 65 66 #Add result 67 saveres[index].__dict__[loadres['fieldname']] = loadres['field'] 68 69 #read next result 70 loadres = ReadData(fid, md) 71 72 fid.close() 73 74 return saveres 75 # }}} 76 77 78 def parseresultsfromdiskiosplit(md, filename): # {{{ 79 #Open file 80 try: 81 fid = open(filename, 'rb') 82 except IOError as e: 83 raise IOError("loadresultsfromdisk error message: could not open '{}' for binary reading.".format(filename)) 84 85 saveres = [] 86 87 #if we have done split I/O, ie, we have results that are fragmented across patches, 88 #do a first pass, and figure out the structure of results 89 loadres = ReadDataDimensions(fid) 90 while loadres: 91 92 #Get time and step 93 if loadres['step'] > len(saveres): 94 for i in range(len(saveres), loadres['step'] - 1): 95 saveres.append(None) 96 saveres.append(resultsclass.results()) 97 setattr(saveres[loadres['step'] - 1], 'step', loadres['step']) 98 setattr(saveres[loadres['step'] - 1], 'time', loadres['time']) 99 100 #Add result 101 setattr(saveres[loadres['step'] - 1], loadres['fieldname'], float('NaN')) 102 103 #read next result 104 loadres = ReadDataDimensions(fid) 105 106 #do a second pass, and figure out the size of the patches 107 fid.seek(0) #rewind 108 loadres = ReadDataDimensions(fid) 109 while loadres: 110 111 #read next result 112 loadres = ReadDataDimensions(fid) 113 114 #third pass, this time to read the real information 115 fid.seek(0) #rewind 116 loadres = ReadData(fid, md) 117 while loadres: 118 119 #Get time and step 120 if loadres['step'] > len(saveres): 121 for i in range(len(saveres), loadres['step'] - 1): 122 saveres.append(None) 123 saveres.append(saveresclass.saveres()) 124 setattr(saveres[loadres['step'] - 1], 'step', loadres['step']) 125 setattr(saveres[loadres['step'] - 1], 'time', loadres['time']) 126 127 #Add result 128 setattr(saveres[loadres['step'] - 1], loadres['fieldname'], loadres['field']) 129 130 #read next result 131 loadres = ReadData(fid, md) 132 133 #close file 134 fid.close() 135 136 return saveres 137 # }}} 138 139 140 def ReadData(fid, md): # {{{ 141 """ 142 READDATA - 143 144 Usage: 145 field = ReadData(fid, md) 146 """ 147 148 #read field 149 try: 150 length = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 151 fieldname = struct.unpack('{}s'.format(length), fid.read(length))[0][:-1] 152 fieldname = fieldname.decode() #strings are binaries when stored so need to be converted back 153 time = struct.unpack('d', fid.read(struct.calcsize('d')))[0] 154 step = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 155 datatype = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 156 M = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 157 if datatype == 1: 158 field = np.array(struct.unpack('{}d'.format(M), fid.read(M * struct.calcsize('d'))), dtype=float) 159 160 elif datatype == 2: 161 field = struct.unpack('{}s'.format(M), fid.read(M))[0][:-1] 162 field = field.decode() 163 164 elif datatype == 3: 165 N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 166 #field = transpose(fread(fid, [N M], 'double')); 167 field = np.zeros(shape=(M, N), dtype=float) 168 for i in range(M): 169 field[i, :] = struct.unpack('{}d'.format(N), fid.read(N * struct.calcsize('d'))) 170 171 elif datatype == 4: 172 N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 173 # field = transpose(fread(fid, [N M], 'int')); 174 field = np.zeros(shape=(M, N), dtype=int) 175 for i in range(M): 176 field[i, :] = struct.unpack('{}i'.format(N), fid.read(N * struct.calcsize('i'))) 177 178 else: 179 raise TypeError("cannot read data of datatype {}".format(datatype)) 180 181 #Process units here FIXME: this should not be done here! 182 yts = md.constants.yts 183 if fieldname == 'BalancethicknessThickeningRate': 184 field = field * yts 185 elif fieldname == 'HydrologyWaterVx': 186 field = field * yts 187 elif fieldname == 'HydrologyWaterVy': 188 field = field * yts 189 elif fieldname == 'Vx': 190 field = field * yts 191 elif fieldname == 'Vy': 192 field = field * yts 193 elif fieldname == 'Vz': 194 field = field * yts 195 elif fieldname == 'Vel': 196 field = field * yts 197 elif fieldname == 'BasalforcingsGroundediceMeltingRate': 198 field = field * yts 199 elif fieldname == 'BasalforcingsFloatingiceMeltingRate': 200 field = field * yts 201 elif fieldname == 'TotalFloatingBmb': 202 field = field / 10.**12 * yts #(GigaTon/year) 203 elif fieldname == 'TotalFloatingBmbScaled': 204 field = field / 10.**12 * yts #(GigaTon/year) 205 elif fieldname == 'TotalGroundedBmb': 206 field = field / 10.**12 * yts #(GigaTon/year) 207 elif fieldname == 'TotalGroundedBmbScaled': 208 field = field / 10.**12 * yts #(GigaTon/year) 209 elif fieldname == 'TotalSmb': 210 field = field / 10.**12 * yts #(GigaTon/year) 211 elif fieldname == 'TotalSmbScaled': 212 field = field / 10.**12 * yts #(GigaTon/year) 213 elif fieldname == 'SmbMassBalance': 214 field = field * yts 215 elif fieldname == 'SmbPrecipitation': 216 field = field * yts 217 elif fieldname == 'SmbRunoff': 218 field = field * yts 219 elif fieldname == 'SmbEvaporation': 220 field = field * yts 221 elif fieldname == 'SmbRefreeze': 222 field = field * yts 223 elif fieldname == 'SmbEC': 224 field = field * yts 225 elif fieldname == 'SmbAccumulation': 226 field = field * yts 227 elif fieldname == 'SmbMelt': 228 field = field * yts 229 elif fieldname == 'SmbMAdd': 230 field = field * yts 231 elif fieldname == 'CalvingCalvingrate': 232 field = field * yts 233 elif fieldname == 'LoveKernelsReal' or fieldname == 'LoveKernelsImag': 234 nlayer = md.materials.numlayers 235 degmax = md.love.sh_nmax 236 nfreq = md.love.nfreq 237 #for numpy 1.8+ only 238 #temp_field = np.full((degmax+1, nfreq, nlayer+1, 6), 0.0) 239 temp_field = np.empty((degmax + 1, nfreq, nlayer + 1, 6)) 240 temp_field.fill(0.0) 241 for ii in range(degmax + 1): 242 for jj in range(nfreq): 243 for kk in range(nlayer + 1): 244 ll = ii * (nlayer + 1) * 6 + (kk * 6 + 1) 245 for mm in range(6): 246 temp_field[ii, jj, kk, mm] = field[ll + mm - 1, jj] 247 field = temp_field 248 249 if time != -9999: 250 time = time / yts 251 252 saveres = OrderedDict() 253 saveres['fieldname'] = fieldname 254 saveres['time'] = time 255 saveres['step'] = step 256 saveres['field'] = field 257 258 except struct.error as e: 259 saveres = None 260 261 return saveres 262 # }}} 263 264 259 265 def ReadDataDimensions(fid): # {{{ 260 """ 261 READDATADIMENSIONS - read data dimensions, step and time, but not the data itself. 262 263 Usage: 264 field=ReadDataDimensions(fid) 265 """ 266 267 #read field 268 try: 269 length=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 270 fieldname=struct.unpack('{}s'.format(length),fid.read(length))[0][:-1] 271 time=struct.unpack('d',fid.read(struct.calcsize('d')))[0] 272 step=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 273 dtattype=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 274 M=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 275 N=1 #default 276 if datatype==1: 277 fid.seek(M*8,1) 278 elif datatype==2: 279 fid.seek(M,1) 280 elif datatype==3: 281 N=struct.unpack('i',fid.read(struct.calcsize('i')))[0] 282 fid.seek(N*M*8,1) 283 else: 284 raise TypeError("cannot read data of datatype {}".format(datatype)) 285 286 saveres=OrderedDict() 287 saveres['fieldname']=fieldname 288 saveres['time']=time 289 saveres['step']=step 290 saveres['M']=M 291 saveres['N']=N 292 293 except struct.error as e: 294 saveres=None 295 296 return saveres 297 # }}} 266 """ 267 READDATADIMENSIONS - read data dimensions, step and time, but not the data itself. 268 269 Usage: 270 field = ReadDataDimensions(fid) 271 """ 272 273 #read field 274 try: 275 length = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 276 fieldname = struct.unpack('{}s'.format(length), fid.read(length))[0][:-1] 277 time = struct.unpack('d', fid.read(struct.calcsize('d')))[0] 278 step = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 279 datatype = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 280 M = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 281 N = 1 #default 282 if datatype == 1: 283 fid.seek(M * 8, 1) 284 elif datatype == 2: 285 fid.seek(M, 1) 286 elif datatype == 3: 287 N = struct.unpack('i', fid.read(struct.calcsize('i')))[0] 288 fid.seek(N * M * 8, 1) 289 else: 290 raise TypeError("cannot read data of datatype {}".format(datatype)) 291 292 saveres = OrderedDict() 293 saveres['fieldname'] = fieldname 294 saveres['time'] = time 295 saveres['step'] = step 296 saveres['M'] = M 297 saveres['N'] = N 298 299 except struct.error as Err: 300 print(Err) 301 saveres = None 302 303 return saveres 304 # }}}
Note:
See TracChangeset
for help on using the changeset viewer.