Changeset 23870
- Timestamp:
- 04/19/19 06:51:58 (6 years ago)
- Location:
- issm/trunk-jpl/src/m
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
issm/trunk-jpl/src/m/classes/friction.py
r23536 r23870 1 import numpy as np2 1 from fielddisplay import fielddisplay 3 2 from project3d import project3d … … 5 4 from WriteData import WriteData 6 5 6 7 7 class friction(object): 8 9 8 """ 9 FRICTION class definition 10 10 11 12 13 11 Usage: 12 friction=friction() 13 """ 14 14 15 def __init__(self): # {{{ 16 self.coefficient = float('NaN') 17 self.p = float('NaN') 18 self.q = float('NaN') 19 self.coupling = 0 20 self.effective_pressure = float('NaN') 21 #set defaults 22 self.setdefaultparameters() 15 def __init__(self): # {{{ 16 self.coefficient = float('NaN') 17 self.p = float('NaN') 18 self.q = float('NaN') 19 self.coupling = 0 20 self.effective_pressure = float('NaN') 21 #set defaults 22 self.setdefaultparameters() 23 #}}} 23 24 24 #}}} 25 def __repr__(self): # {{{ 26 string="Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s-1) * u_b,\n(effective stress Neff=rho_ice*g*thickness+rho_water*g*base, r=q/p and s=1/p)" 25 def __repr__(self): # {{{ 26 string = "Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s-1) * u_b,\n(effective stress Neff=rho_ice*g*thickness+rho_water*g*base, r=q/p and s=1/p)" 27 27 28 string="%s\n%s"%(string,fielddisplay(self,"coefficient","friction coefficient [SI]")) 29 string="%s\n%s"%(string,fielddisplay(self,"p","p exponent")) 30 string="%s\n%s"%(string,fielddisplay(self,"q","q exponent")) 31 string="%s\n%s"%(string,fielddisplay(self,'coupling','Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: used coupled model (not implemented yet)')) 32 string="%s\n%s"%(string,fielddisplay(self,'effective_pressure','Effective Pressure for the forcing if not coupled [Pa]')) 33 return string 34 #}}} 35 def extrude(self,md): # {{{ 36 self.coefficient=project3d(md,'vector',self.coefficient,'type','node','layer',1) 37 self.p=project3d(md,'vector',self.p,'type','element') 38 self.q=project3d(md,'vector',self.q,'type','element') 39 #if self.coupling==0: #doesnt work with empty loop, so just skip it? 40 if self.coupling in[3,4]: 41 self.effective_pressure=project3d(md,'vector',self.effective_pressure,'type','node','layer',1) 42 elif self.coupling > 4: 43 raise ValueError('md.friction.coupling larger than 4, not supported yet') 44 return self 45 #}}} 46 def setdefaultparameters(self): # {{{ 47 return self 48 #}}} 49 def checkconsistency(self,md,solution,analyses): # {{{ 28 string = "%s\n%s" % (string, fielddisplay(self, "coefficient", "friction coefficient [SI]")) 29 string = "%s\n%s" % (string, fielddisplay(self, "p", "p exponent")) 30 string = "%s\n%s" % (string, fielddisplay(self, "q", "q exponent")) 31 string = "%s\n%s" % (string, fielddisplay(self, 'coupling', 'Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: used coupled model (not implemented yet)')) 32 string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure', 'Effective Pressure for the forcing if not coupled [Pa]')) 33 return string 34 #}}} 50 35 51 #Early return 52 if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: 53 return md 36 def extrude(self, md): # {{{ 37 self.coefficient = project3d(md, 'vector', self.coefficient, 'type', 'node', 'layer', 1) 38 self.p = project3d(md, 'vector', self.p, 'type', 'element') 39 self.q = project3d(md, 'vector', self.q, 'type', 'element') 40 #if self.coupling==0: #doesnt work with empty loop, so just skip it? 41 if self.coupling in[3, 4]: 42 self.effective_pressure = project3d(md, 'vector', self.effective_pressure, 'type', 'node', 'layer', 1) 43 elif self.coupling > 4: 44 raise ValueError('md.friction.coupling larger than 4, not supported yet') 45 return self 46 #}}} 54 47 55 md = checkfield(md,'fieldname','friction.coefficient','timeseries',1,'NaN',1,'Inf',1) 56 md = checkfield(md,'fieldname','friction.q','NaN',1,'Inf',1,'size',[md.mesh.numberofelements]) 57 md = checkfield(md,'fieldname','friction.p','NaN',1,'Inf',1,'size',[md.mesh.numberofelements]) 58 md = checkfield(md,'fieldname','friction.coupling','numel',[1],'values',[0,1,2,3,4]) 59 if self.coupling==3: 60 md = checkfield(md,'fieldname','friction.effective_pressure','NaN',1,'Inf',1,'timeseries',1) 61 elif self.coupling > 4: 62 raise ValueError('md.friction.coupling larger than 4, not supported yet') 63 return md 64 # }}} 65 def marshall(self,prefix,md,fid): # {{{ 66 WriteData(fid,prefix,'name','md.friction.law','data',1,'format','Integer') 67 WriteData(fid,prefix,'object',self,'fieldname','coefficient','format','DoubleMat','mattype',1) 68 WriteData(fid,prefix,'object',self,'fieldname','p','format','DoubleMat','mattype',2) 69 WriteData(fid,prefix,'object',self,'fieldname','q','format','DoubleMat','mattype',2) 70 WriteData(fid,prefix,'class','friction','object',self,'fieldname','coupling','format','Integer') 71 if self.coupling in[3,4]: 72 WriteData(fid,prefix,'class','friction','object',self,'fieldname','effective_pressure','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts) 73 elif self.coupling > 4: 74 raise ValueError('md.friction.coupling larger than 4, not supported yet') 75 # }}} 48 def setdefaultparameters(self): # {{{ 49 return self 50 #}}} 51 52 def checkconsistency(self, md, solution, analyses): # {{{ 53 54 #Early return 55 if 'StressbalanceAnalysis' not in analyses and 'ThermalAnalysis' not in analyses: 56 return md 57 58 md = checkfield(md, 'fieldname', 'friction.coefficient', 'timeseries', 1, 'NaN', 1, 'Inf', 1) 59 md = checkfield(md, 'fieldname', 'friction.q', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) 60 md = checkfield(md, 'fieldname', 'friction.p', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements]) 61 md = checkfield(md, 'fieldname', 'friction.coupling', 'numel', [1], 'values', [0, 1, 2, 3, 4]) 62 if self.coupling == 3: 63 md = checkfield(md, 'fieldname', 'friction.effective_pressure', 'NaN', 1, 'Inf', 1, 'timeseries', 1) 64 elif self.coupling > 4: 65 raise ValueError('md.friction.coupling larger than 4, not supported yet') 66 return md 67 # }}} 68 69 def marshall(self, prefix, md, fid): # {{{ 70 WriteData(fid, prefix, 'name', 'md.friction.law', 'data', 1, 'format', 'Integer') 71 WriteData(fid, prefix, 'object', self, 'fieldname', 'coefficient', 'format', 'DoubleMat', 'mattype', 1) 72 WriteData(fid, prefix, 'object', self, 'fieldname', 'p', 'format', 'DoubleMat', 'mattype', 2) 73 WriteData(fid, prefix, 'object', self, 'fieldname', 'q', 'format', 'DoubleMat', 'mattype', 2) 74 WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'coupling', 'format', 'Integer') 75 if self.coupling in[3, 4]: 76 WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'effective_pressure', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) 77 elif self.coupling > 4: 78 raise ValueError('md.friction.coupling larger than 4, not supported yet') 79 # }}} -
issm/trunk-jpl/src/m/classes/hydrologydc.py
r23716 r23870 5 5 from WriteData import WriteData 6 6 7 7 8 class hydrologydc(object): 8 """ 9 Hydrologydc class definition 10 11 Usage: 12 hydrologydc=hydrologydc(); 13 """ 14 15 def __init__(self): # {{{ 16 self.water_compressibility = 0 17 self.isefficientlayer = 0 18 self.penalty_factor = 0 19 self.penalty_lock = 0 20 self.rel_tol = 0 21 self.max_iter = 0 22 self.steps_per_step = 0 23 self.sedimentlimit_flag = 0 24 self.sedimentlimit = 0 25 self.transfer_flag = 0 26 self.unconfined_flag = 0 27 self.leakage_factor = 0 28 self.basal_moulin_input = np.nan 29 self.requested_outputs = [] 30 31 self.spcsediment_head = np.nan 32 self.mask_thawed_node = np.nan 33 self.sediment_transmitivity = np.nan 34 self.sediment_compressibility = 0 35 self.sediment_porosity = 0 36 self.sediment_thickness = 0 37 38 self.spcepl_head = np.nan 39 self.mask_eplactive_node = np.nan 40 self.epl_compressibility = 0 41 self.epl_porosity = 0 42 self.epl_initial_thickness = 0 43 self.epl_colapse_thickness = 0 44 self.epl_thick_comp = 0 45 self.epl_max_thickness = 0 46 self.epl_conductivity = 0 47 self.eplflip_lock = 0 48 49 #set defaults 50 self.setdefaultparameters() 51 #}}} 52 def __repr__(self): # {{{ 53 string=' hydrology Dual Porous Continuum Equivalent parameters:' 54 string=' - general parameters' 55 string="%s\n%s"%(string,fielddisplay(self,'water_compressibility','compressibility of water [Pa^-1]')) 56 string="%s\n%s"%(string,fielddisplay(self,'isefficientlayer','do we use an efficient drainage system [1: true 0: false]')) 57 string="%s\n%s"%(string,fielddisplay(self,'penalty_factor','exponent of the value used in the penalisation method [dimensionless]')) 58 string="%s\n%s"%(string,fielddisplay(self,'penalty_lock','stabilize unstable constraints that keep zigzagging after n iteration (default is 0, no stabilization)')) 59 string="%s\n%s"%(string,fielddisplay(self,'rel_tol','tolerance of the nonlinear iteration for the transfer between layers [dimensionless]')) 60 string="%s\n%s"%(string,fielddisplay(self,'max_iter','maximum number of nonlinear iteration')) 61 string="%s\n%s"%(string,fielddisplay(self,'steps_per_step','number of hydrology steps per time step')) 62 string="%s\n%s"%(string,fielddisplay(self,'basal_moulin_input','water flux at a given point [m3 s-1]')) 63 string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) 64 string="%s\n%s"%(string,fielddisplay(self,'sedimentlimit_flag','what kind of upper limit is applied for the inefficient layer')) 65 string="%s\n\t\t%s"%(string,'0: no limit') 66 string="%s\n\t\t%s"%(string,'1: user defined sedimentlimit') 67 string="%s\n\t\t%s"%(string,'2: hydrostatic pressure') 68 string="%s\n\t\t%s"%(string,'3: normal stress') 69 70 if self.sedimentlimit_flag==1: 71 string="%s\n%s"%(string,fielddisplay(self,'sedimentlimit','user defined upper limit for the inefficient layer [m]')) 72 73 string="%s\n%s"%(string,fielddisplay(self,'transfer_flag','what kind of transfer method is applied between the layers')) 74 string="%s\n\t\t%s"%(string,'0: no transfer') 75 string="%s\n\t\t%s"%(string,'1: constant leakage factor: leakage_factor') 76 77 if self.transfer_flag is 1: 78 string="%s\n%s"%(string,fielddisplay(self,'leakage_factor','user defined leakage factor [m]')) 79 80 string="%s\n%s"%(string,fielddisplay(self,'unconfined_flag','using an unconfined scheme or not (transitory)')) 81 string="%s\n\t\t%s"%(string,'0: Confined only') 82 string="%s\n\t\t%s"%(string,'1: Confined-Unconfined') 83 84 string="%s\n%s"%(string,' - for the sediment layer') 85 string="%s\n%s"%(string,fielddisplay(self,'spcsediment_head','sediment water head constraints (NaN means no constraint) [m above MSL]')) 86 string="%s\n%s"%(string,fielddisplay(self,'sediment_compressibility','sediment compressibility [Pa^-1]')) 87 string="%s\n%s"%(string,fielddisplay(self,'sediment_porosity','sediment [dimensionless]')) 88 string="%s\n%s"%(string,fielddisplay(self,'sediment_thickness','sediment thickness [m]')) 89 string="%s\n%s"%(string,fielddisplay(self,'sediment_transmitivity','sediment transmitivity [m^2/s]')) 90 string="%s\n%s"%(string,fielddisplay(self,'mask_thawed_node','IDS is deactivaed (0) on frozen nodes')) 91 92 if self.isefficientlayer==1: 93 string="%s\n%s"%(string,' - for the epl layer') 94 string="%s\n%s"%(string,fielddisplay(self,'spcepl_head','epl water head constraints (NaN means no constraint) [m above MSL]')) 95 string="%s\n%s"%(string,fielddisplay(self,'mask_eplactive_node','active (1) or not (0) EPL')) 96 string="%s\n%s"%(string,fielddisplay(self,'epl_compressibility','epl compressibility [Pa^-1]')) 97 string="%s\n%s"%(string,fielddisplay(self,'epl_porosity','epl [dimensionless]')) 98 string="%s\n%s"%(string,fielddisplay(self,'epl_max_thickness','epl maximal thickness [m]')) 99 string="%s\n%s"%(string,fielddisplay(self,'epl_initial_thickness','epl initial thickness [m]')) 100 string="%s\n%s"%(string,fielddisplay(self,'epl_colapse_thickness','epl colapsing thickness [m]')) 101 string="%s\n%s"%(string,fielddisplay(self,'epl_thick_comp','epl thickness computation flag')) 102 string="%s\n%s"%(string,fielddisplay(self,'epl_conductivity','epl conductivity [m^2/s]')) 103 string="%s\n%s"%(string,fielddisplay(self,'eplflip_lock','lock epl activity to avoid flip-floping (default is 0, no stabilization)')) 104 return string 105 #}}} 106 def extrude(self,md): # {{{ 107 self.spcsediment_head=project3d(md,'vector',self.spcsediment_head,'type','node','layer',1) 108 self.sediment_transmitivity=project3d(md,'vector',self.sediment_transmitivity,'type','node','layer',1) 109 self.basal_moulin_input=project3d(md,'vector',self.basal_moulin_input,'type','node','layer',1) 110 self.mask_thawed_node=project3d(md,'vector',self.mask_thawed_node,'type','node','layer',1) 111 if self.isefficientlayer==1 : 112 self.spcepl_head=project3d(md,'vector',self.spcepl_head,'type','node','layer',1) 113 self.mask_eplactive_node=project3d(md,'vector',self.mask_eplactive_node,'type','node','layer',1) 114 return self 115 #}}} 116 def setdefaultparameters(self): #{{{ 117 #Parameters from de Fleurian 2014 118 self.water_compressibility = 5.04e-10 119 self.isefficientlayer = 1 120 self.penalty_factor = 3 121 self.penalty_lock = 0 122 self.rel_tol = 1.0e-06 123 self.max_iter = 100 124 self.steps_per_step = 1 125 self.sedimentlimit_flag = 0 126 self.sedimentlimit = 0 127 self.transfer_flag = 0 128 self.unconfined_flag = 0 129 self.leakage_factor = 10.0 130 self.requested_outputs = ['default'] 131 132 self.sediment_compressibility = 1.0e-08 133 self.sediment_porosity = 0.4 134 self.sediment_thickness = 20.0 135 self.sediment_transmitivity = 8.0e-04 136 137 self.epl_compressibility = 1.0e-08 138 self.epl_conductivity = 8.0e-02 139 self.epl_porosity = 0.4 140 self.epl_initial_thickness = 1.0 141 self.epl_colapse_thickness = self.sediment_transmitivity/self.epl_conductivity 142 self.epl_thick_comp = 1 143 self.epl_max_thickness = 5.0 144 self.eplflip_lock = 0 145 146 return self 147 # }}} 148 149 def defaultoutputs(self,md): # {{{ 150 list = ['SedimentHeadHydrostep','SedimentHeadResidual','EffectivePressureHydrostep'] 151 if self.isefficientlayer==1: 152 list.extend(['EplHeadHydrostep','HydrologydcMaskEplactiveNode','HydrologydcMaskEplactiveElt','EplHeadSlopeX','EplHeadSlopeY','HydrologydcEplThicknessHydrostep']) 153 if self.steps_per_step>1: 154 list.extend(['EffectivePressure','SedimentHead']) 155 if self.isefficientlayer==1: 156 list.extend(['EplHead','HydrologydcEplThickness']) 157 return list 158 #}}} 159 160 def initialize(self,md): # {{{ 161 self.epl_colapse_thickness = self.sediment_transmitivity/self.epl_conductivity 162 if np.all(np.isnan(self.basal_moulin_input)): 163 self.basal_moulin_input=np.zeros((md.mesh.numberofvertices)) 164 print(" no hydrology.basal_moulin_input specified: values set as zero") 165 166 return self 167 # }}} 168 def checkconsistency(self,md,solution,analyses): #{{{ 169 170 #Early return 171 if 'HydrologyDCInefficientAnalysis' not in analyses and 'HydrologyDCEfficientAnalysis' not in analyses: 172 return md 173 174 md = checkfield(md,'fieldname','hydrology.water_compressibility','numel',[1],'>',0.) 175 md = checkfield(md,'fieldname','hydrology.isefficientlayer','numel',[1],'values',[0,1]) 176 md = checkfield(md,'fieldname','hydrology.penalty_factor','>',0.,'numel',[1]) 177 md = checkfield(md,'fieldname','hydrology.penalty_lock','>=',0.,'numel',[1]) 178 md = checkfield(md,'fieldname','hydrology.rel_tol','>',0.,'numel',[1]) 179 md = checkfield(md,'fieldname','hydrology.max_iter','>',0.,'numel',[1]) 180 md = checkfield(md,'fieldname','hydrology.steps_per_step','>',0.,'numel',[1]) 181 md = checkfield(md,'fieldname','hydrology.sedimentlimit_flag','numel',[1],'values',[0,1,2,3]) 182 md = checkfield(md,'fieldname','hydrology.transfer_flag','numel',[1],'values',[0,1]) 183 md = checkfield(md,'fieldname','hydrology.unconfined_flag','numel',[1],'values',[0,1]) 184 md = checkfield(md,'fieldname','hydrology.requested_outputs','stringrow',1) 185 186 if self.sedimentlimit_flag==1: 187 md = checkfield(md,'fieldname','hydrology.sedimentlimit','>',0.,'numel',[1]) 188 189 if self.transfer_flag==1: 190 md = checkfield(md,'fieldname','hydrology.leakage_factor','>',0.,'numel',[1]) 191 192 md = checkfield(md,'fieldname','hydrology.basal_moulin_input','NaN',1,'Inf',1,'timeseries',1) 193 md = checkfield(md,'fieldname','hydrology.spcsediment_head','Inf',1,'timeseries',1) 194 md = checkfield(md,'fieldname','hydrology.sediment_compressibility','>',0.,'numel',[1]) 195 md = checkfield(md,'fieldname','hydrology.sediment_porosity','>',0.,'numel',[1]) 196 md = checkfield(md,'fieldname','hydrology.sediment_thickness','>',0.,'numel',[1]) 197 md = checkfield(md,'fieldname','hydrology.sediment_transmitivity','>=',0,'size',[md.mesh.numberofvertices]) 198 md = checkfield(md,'fieldname','hydrology.mask_thawed_node','size',[md.mesh.numberofvertices],'values',[0,1]) 199 if self.isefficientlayer==1: 200 md = checkfield(md,'fieldname','hydrology.spcepl_head','Inf',1,'timeseries',1) 201 md = checkfield(md,'fieldname','hydrology.mask_eplactive_node','size',[md.mesh.numberofvertices],'values',[0,1]) 202 md = checkfield(md,'fieldname','hydrology.epl_compressibility','>',0.,'numel',[1]) 203 md = checkfield(md,'fieldname','hydrology.epl_porosity','>',0.,'numel',[1]) 204 md = checkfield(md,'fieldname','hydrology.epl_max_thickness','numel',[1],'>',0.) 205 md = checkfield(md,'fieldname','hydrology.epl_initial_thickness','numel',[1],'>',0.) 206 md = checkfield(md,'fieldname','hydrology.epl_colapse_thickness','numel',[1],'>',0.) 207 md = checkfield(md,'fieldname','hydrology.epl_thick_comp','numel',[1],'values',[0,1]) 208 md = checkfield(md,'fieldname','hydrology.eplflip_lock','>=',0.,'numel',[1]) 209 if self.epl_colapse_thickness > self.epl_initial_thickness: 210 md.checkmessage('Colapsing thickness for EPL larger than initial thickness') 211 md = checkfield(md,'fieldname','hydrology.epl_conductivity','numel',[1],'>',0.) 212 # }}} 213 def marshall(self,prefix,md,fid): #{{{ 214 WriteData(fid,prefix,'name','md.hydrology.model','data',1,'format','Integer') 215 WriteData(fid,prefix,'object',self,'fieldname','water_compressibility','format','Double') 216 WriteData(fid,prefix,'object',self,'fieldname','isefficientlayer','format','Boolean') 217 WriteData(fid,prefix,'object',self,'fieldname','penalty_factor','format','Double') 218 WriteData(fid,prefix,'object',self,'fieldname','penalty_lock','format','Integer') 219 WriteData(fid,prefix,'object',self,'fieldname','rel_tol','format','Double') 220 WriteData(fid,prefix,'object',self,'fieldname','max_iter','format','Integer') 221 WriteData(fid,prefix,'object',self,'fieldname','steps_per_step','format','Integer') 222 WriteData(fid,prefix,'object',self,'fieldname','sedimentlimit_flag','format','Integer') 223 WriteData(fid,prefix,'object',self,'fieldname','transfer_flag','format','Integer') 224 WriteData(fid,prefix,'object',self,'fieldname','unconfined_flag','format','Integer') 225 if self.sedimentlimit_flag==1: 226 WriteData(fid,prefix,'object',self,'fieldname','sedimentlimit','format','Double') 227 228 if self.transfer_flag==1: 229 WriteData(fid,prefix,'object',self,'fieldname','leakage_factor','format','Double') 230 231 WriteData(fid,prefix,'object',self,'fieldname','basal_moulin_input','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts) 232 WriteData(fid,prefix,'object',self,'fieldname','spcsediment_head','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts) 233 WriteData(fid,prefix,'object',self,'fieldname','sediment_compressibility','format','Double') 234 WriteData(fid,prefix,'object',self,'fieldname','sediment_porosity','format','Double') 235 WriteData(fid,prefix,'object',self,'fieldname','sediment_thickness','format','Double') 236 WriteData(fid,prefix,'object',self,'fieldname','sediment_transmitivity','format','DoubleMat','mattype',1) 237 WriteData(fid,prefix,'object',self,'fieldname','mask_thawed_node','format','DoubleMat','mattype',1) 238 239 if self.isefficientlayer==1: 240 WriteData(fid,prefix,'object',self,'fieldname','spcepl_head','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts) 241 WriteData(fid,prefix,'object',self,'fieldname','mask_eplactive_node','format','DoubleMat','mattype',1) 242 WriteData(fid,prefix,'object',self,'fieldname','epl_compressibility','format','Double') 243 WriteData(fid,prefix,'object',self,'fieldname','epl_porosity','format','Double') 244 WriteData(fid,prefix,'object',self,'fieldname','epl_max_thickness','format','Double') 245 WriteData(fid,prefix,'object',self,'fieldname','epl_initial_thickness','format','Double') 246 WriteData(fid,prefix,'object',self,'fieldname','epl_colapse_thickness','format','Double') 247 WriteData(fid,prefix,'object',self,'fieldname','epl_thick_comp','format','Integer') 248 WriteData(fid,prefix,'object',self,'fieldname','epl_conductivity','format','Double') 249 WriteData(fid,prefix,'object',self,'fieldname','eplflip_lock','format','Integer') 250 251 #process requested outputs 252 outputs = self.requested_outputs 253 indices = [i for i, x in enumerate(outputs) if x == 'default'] 254 if len(indices) > 0: 255 outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] 256 outputs =outputscopy 257 WriteData(fid,prefix,'data',outputs,'name','md.hydrology.requested_outputs','format','StringArray') 258 # }}} 9 """ 10 Hydrologydc class definition 11 12 Usage: 13 hydrologydc=hydrologydc(); 14 """ 15 16 def __init__(self): # {{{ 17 self.water_compressibility = 0 18 self.isefficientlayer = 0 19 self.penalty_factor = 0 20 self.penalty_lock = 0 21 self.rel_tol = 0 22 self.max_iter = 0 23 self.steps_per_step = 0 24 self.sedimentlimit_flag = 0 25 self.sedimentlimit = 0 26 self.transfer_flag = 0 27 self.unconfined_flag = 0 28 self.leakage_factor = 0 29 self.basal_moulin_input = np.nan 30 self.requested_outputs = [] 31 32 self.spcsediment_head = np.nan 33 self.mask_thawed_node = np.nan 34 self.sediment_transmitivity = np.nan 35 self.sediment_compressibility = 0 36 self.sediment_porosity = 0 37 self.sediment_thickness = 0 38 39 self.spcepl_head = np.nan 40 self.mask_eplactive_node = np.nan 41 self.epl_compressibility = 0 42 self.epl_porosity = 0 43 self.epl_initial_thickness = 0 44 self.epl_colapse_thickness = 0 45 self.epl_thick_comp = 0 46 self.epl_max_thickness = 0 47 self.epl_conductivity = 0 48 self.eplflip_lock = 0 49 50 #set defaults 51 self.setdefaultparameters() 52 #}}} 53 54 def __repr__(self): # {{{ 55 string = ' hydrology Dual Porous Continuum Equivalent parameters:' 56 string = ' - general parameters' 57 string = "%s\n%s" % (string, fielddisplay(self, 'water_compressibility', 'compressibility of water [Pa^-1]')) 58 string = "%s\n%s" % (string, fielddisplay(self, 'isefficientlayer', 'do we use an efficient drainage system [1: true 0: false]')) 59 string = "%s\n%s" % (string, fielddisplay(self, 'penalty_factor', 'exponent of the value used in the penalisation method [dimensionless]')) 60 string = "%s\n%s" % (string, fielddisplay(self, 'penalty_lock', 'stabilize unstable constraints that keep zigzagging after n iteration (default is 0, no stabilization)')) 61 string = "%s\n%s" % (string, fielddisplay(self, 'rel_tol', 'tolerance of the nonlinear iteration for the transfer between layers [dimensionless]')) 62 string = "%s\n%s" % (string, fielddisplay(self, 'max_iter', 'maximum number of nonlinear iteration')) 63 string = "%s\n%s" % (string, fielddisplay(self, 'steps_per_step', 'number of hydrology steps per time step')) 64 string = "%s\n%s" % (string, fielddisplay(self, 'basal_moulin_input', 'water flux at a given point [m3 s-1]')) 65 string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested')) 66 string = "%s\n%s" % (string, fielddisplay(self, 'sedimentlimit_flag', 'what kind of upper limit is applied for the inefficient layer')) 67 string = "%s\n\t\t%s" % (string, '0: no limit') 68 string = "%s\n\t\t%s" % (string, '1: user defined sedimentlimit') 69 string = "%s\n\t\t%s" % (string, '2: hydrostatic pressure') 70 string = "%s\n\t\t%s" % (string, '3: normal stress') 71 72 if self.sedimentlimit_flag == 1: 73 string = "%s\n%s" % (string, fielddisplay(self, 'sedimentlimit', 'user defined upper limit for the inefficient layer [m]')) 74 75 string = "%s\n%s" % (string, fielddisplay(self, 'transfer_flag', 'what kind of transfer method is applied between the layers')) 76 string = "%s\n\t\t%s" % (string, '0: no transfer') 77 string = "%s\n\t\t%s" % (string, '1: constant leakage factor: leakage_factor') 78 79 if self.transfer_flag == 1: 80 string = "%s\n%s" % (string, fielddisplay(self, 'leakage_factor', 'user defined leakage factor [m]')) 81 82 string = "%s\n%s" % (string, fielddisplay(self, 'unconfined_flag', 'using an unconfined scheme or not (transitory)')) 83 string = "%s\n\t\t%s" % (string, '0: Confined only') 84 string = "%s\n\t\t%s" % (string, '1: Confined-Unconfined') 85 86 string = "%s\n%s" % (string, ' - for the sediment layer') 87 string = "%s\n%s" % (string, fielddisplay(self, 'spcsediment_head', 'sediment water head constraints (NaN means no constraint) [m above MSL]')) 88 string = "%s\n%s" % (string, fielddisplay(self, 'sediment_compressibility', 'sediment compressibility [Pa^-1]')) 89 string = "%s\n%s" % (string, fielddisplay(self, 'sediment_porosity', 'sediment [dimensionless]')) 90 string = "%s\n%s" % (string, fielddisplay(self, 'sediment_thickness', 'sediment thickness [m]')) 91 string = "%s\n%s" % (string, fielddisplay(self, 'sediment_transmitivity', 'sediment transmitivity [m^2/s]')) 92 string = "%s\n%s" % (string, fielddisplay(self, 'mask_thawed_node', 'IDS is deactivaed (0) on frozen nodes')) 93 94 if self.isefficientlayer == 1: 95 string = "%s\n%s" % (string, ' - for the epl layer') 96 string = "%s\n%s" % (string, fielddisplay(self, 'spcepl_head', 'epl water head constraints (NaN means no constraint) [m above MSL]')) 97 string = "%s\n%s" % (string, fielddisplay(self, 'mask_eplactive_node', 'active (1) or not (0) EPL')) 98 string = "%s\n%s" % (string, fielddisplay(self, 'epl_compressibility', 'epl compressibility [Pa^-1]')) 99 string = "%s\n%s" % (string, fielddisplay(self, 'epl_porosity', 'epl [dimensionless]')) 100 string = "%s\n%s" % (string, fielddisplay(self, 'epl_max_thickness', 'epl maximal thickness [m]')) 101 string = "%s\n%s" % (string, fielddisplay(self, 'epl_initial_thickness', 'epl initial thickness [m]')) 102 string = "%s\n%s" % (string, fielddisplay(self, 'epl_colapse_thickness', 'epl colapsing thickness [m]')) 103 string = "%s\n%s" % (string, fielddisplay(self, 'epl_thick_comp', 'epl thickness computation flag')) 104 string = "%s\n%s" % (string, fielddisplay(self, 'epl_conductivity', 'epl conductivity [m^2/s]')) 105 string = "%s\n%s" % (string, fielddisplay(self, 'eplflip_lock', 'lock epl activity to avoid flip-floping (default is 0, no stabilization)')) 106 return string 107 108 def extrude(self, md): # {{{ 109 self.spcsediment_head = project3d(md, 'vector', self.spcsediment_head, 'type', 'node', 'layer', 1) 110 self.sediment_transmitivity = project3d(md, 'vector', self.sediment_transmitivity, 'type', 'node', 'layer', 1) 111 self.basal_moulin_input = project3d(md, 'vector', self.basal_moulin_input, 'type', 'node', 'layer', 1) 112 self.mask_thawed_node = project3d(md, 'vector', self.mask_thawed_node, 'type', 'node', 'layer', 1) 113 if self.isefficientlayer == 1: 114 self.spcepl_head = project3d(md, 'vector', self.spcepl_head, 'type', 'node', 'layer', 1) 115 self.mask_eplactive_node = project3d(md, 'vector', self.mask_eplactive_node, 'type', 'node', 'layer', 1) 116 return self 117 #}}} 118 119 def setdefaultparameters(self): #{{{ 120 #Parameters from de Fleurian 2014 121 self.water_compressibility = 5.04e-10 122 self.isefficientlayer = 1 123 self.penalty_factor = 3 124 self.penalty_lock = 0 125 self.rel_tol = 1.0e-06 126 self.max_iter = 100 127 self.steps_per_step = 1 128 self.sedimentlimit_flag = 0 129 self.sedimentlimit = 0 130 self.transfer_flag = 0 131 self.unconfined_flag = 0 132 self.leakage_factor = 10.0 133 self.requested_outputs = ['default'] 134 135 self.sediment_compressibility = 1.0e-08 136 self.sediment_porosity = 0.4 137 self.sediment_thickness = 20.0 138 self.sediment_transmitivity = 8.0e-04 139 140 self.epl_compressibility = 1.0e-08 141 self.epl_conductivity = 8.0e-02 142 self.epl_porosity = 0.4 143 self.epl_initial_thickness = 1.0 144 self.epl_colapse_thickness = self.sediment_transmitivity / self.epl_conductivity 145 self.epl_thick_comp = 1 146 self.epl_max_thickness = 5.0 147 self.eplflip_lock = 0 148 149 return self 150 # }}} 151 152 def defaultoutputs(self, md): # {{{ 153 list = ['SedimentHeadHydrostep', 'SedimentHeadResidual', 'EffectivePressureHydrostep'] 154 if self.isefficientlayer == 1: 155 list.extend(['EplHeadHydrostep', 'HydrologydcMaskEplactiveNode', 'HydrologydcMaskEplactiveElt', 'EplHeadSlopeX', 'EplHeadSlopeY', 'HydrologydcEplThicknessHydrostep']) 156 if self.steps_per_step > 1: 157 list.extend(['EffectivePressure', 'SedimentHead']) 158 if self.isefficientlayer == 1: 159 list.extend(['EplHead', 'HydrologydcEplThickness']) 160 return list 161 #}}} 162 163 def initialize(self, md): # {{{ 164 self.epl_colapse_thickness = self.sediment_transmitivity / self.epl_conductivity 165 if np.all(np.isnan(self.basal_moulin_input)): 166 self.basal_moulin_input = np.zeros((md.mesh.numberofvertices)) 167 print(" no hydrology.basal_moulin_input specified: values set as zero") 168 169 return self 170 # }}} 171 172 def checkconsistency(self, md, solution, analyses): #{{{ 173 174 #Early return 175 if 'HydrologyDCInefficientAnalysis' not in analyses and 'HydrologyDCEfficientAnalysis' not in analyses: 176 return md 177 178 md = checkfield(md, 'fieldname', 'hydrology.water_compressibility', 'numel', [1], '>', 0.) 179 md = checkfield(md, 'fieldname', 'hydrology.isefficientlayer', 'numel', [1], 'values', [0, 1]) 180 md = checkfield(md, 'fieldname', 'hydrology.penalty_factor', '>', 0., 'numel', [1]) 181 md = checkfield(md, 'fieldname', 'hydrology.penalty_lock', '>=', 0., 'numel', [1]) 182 md = checkfield(md, 'fieldname', 'hydrology.rel_tol', '>', 0., 'numel', [1]) 183 md = checkfield(md, 'fieldname', 'hydrology.max_iter', '>', 0., 'numel', [1]) 184 md = checkfield(md, 'fieldname', 'hydrology.steps_per_step', '>', 0., 'numel', [1]) 185 md = checkfield(md, 'fieldname', 'hydrology.sedimentlimit_flag', 'numel', [1], 'values', [0, 1, 2, 3]) 186 md = checkfield(md, 'fieldname', 'hydrology.transfer_flag', 'numel', [1], 'values', [0, 1]) 187 md = checkfield(md, 'fieldname', 'hydrology.unconfined_flag', 'numel', [1], 'values', [0, 1]) 188 md = checkfield(md, 'fieldname', 'hydrology.requested_outputs', 'stringrow', 1) 189 190 if self.sedimentlimit_flag == 1: 191 md = checkfield(md, 'fieldname', 'hydrology.sedimentlimit', '>', 0., 'numel', [1]) 192 193 if self.transfer_flag == 1: 194 md = checkfield(md, 'fieldname', 'hydrology.leakage_factor', '>', 0., 'numel', [1]) 195 196 md = checkfield(md, 'fieldname', 'hydrology.basal_moulin_input', 'NaN', 1, 'Inf', 1, 'timeseries', 1) 197 md = checkfield(md, 'fieldname', 'hydrology.spcsediment_head', 'Inf', 1, 'timeseries', 1) 198 md = checkfield(md, 'fieldname', 'hydrology.sediment_compressibility', '>', 0., 'numel', [1]) 199 md = checkfield(md, 'fieldname', 'hydrology.sediment_porosity', '>', 0., 'numel', [1]) 200 md = checkfield(md, 'fieldname', 'hydrology.sediment_thickness', '>', 0., 'numel', [1]) 201 md = checkfield(md, 'fieldname', 'hydrology.sediment_transmitivity', '>=', 0, 'size', [md.mesh.numberofvertices]) 202 md = checkfield(md, 'fieldname', 'hydrology.mask_thawed_node', 'size', [md.mesh.numberofvertices], 'values', [0, 1]) 203 if self.isefficientlayer == 1: 204 md = checkfield(md, 'fieldname', 'hydrology.spcepl_head', 'Inf', 1, 'timeseries', 1) 205 md = checkfield(md, 'fieldname', 'hydrology.mask_eplactive_node', 'size', [md.mesh.numberofvertices], 'values', [0, 1]) 206 md = checkfield(md, 'fieldname', 'hydrology.epl_compressibility', '>', 0., 'numel', [1]) 207 md = checkfield(md, 'fieldname', 'hydrology.epl_porosity', '>', 0., 'numel', [1]) 208 md = checkfield(md, 'fieldname', 'hydrology.epl_max_thickness', 'numel', [1], '>', 0.) 209 md = checkfield(md, 'fieldname', 'hydrology.epl_initial_thickness', 'numel', [1], '>', 0.) 210 md = checkfield(md, 'fieldname', 'hydrology.epl_colapse_thickness', 'numel', [1], '>', 0.) 211 md = checkfield(md, 'fieldname', 'hydrology.epl_thick_comp', 'numel', [1], 'values', [0, 1]) 212 md = checkfield(md, 'fieldname', 'hydrology.eplflip_lock', '>=', 0., 'numel', [1]) 213 if self.epl_colapse_thickness > self.epl_initial_thickness: 214 md.checkmessage('Colapsing thickness for EPL larger than initial thickness') 215 md = checkfield(md, 'fieldname', 'hydrology.epl_conductivity', 'numel', [1], '>', 0.) 216 # }}} 217 218 def marshall(self, prefix, md, fid): #{{{ 219 WriteData(fid, prefix, 'name', 'md.hydrology.model', 'data', 1, 'format', 'Integer') 220 WriteData(fid, prefix, 'object', self, 'fieldname', 'water_compressibility', 'format', 'Double') 221 WriteData(fid, prefix, 'object', self, 'fieldname', 'isefficientlayer', 'format', 'Boolean') 222 WriteData(fid, prefix, 'object', self, 'fieldname', 'penalty_factor', 'format', 'Double') 223 WriteData(fid, prefix, 'object', self, 'fieldname', 'penalty_lock', 'format', 'Integer') 224 WriteData(fid, prefix, 'object', self, 'fieldname', 'rel_tol', 'format', 'Double') 225 WriteData(fid, prefix, 'object', self, 'fieldname', 'max_iter', 'format', 'Integer') 226 WriteData(fid, prefix, 'object', self, 'fieldname', 'steps_per_step', 'format', 'Integer') 227 WriteData(fid, prefix, 'object', self, 'fieldname', 'sedimentlimit_flag', 'format', 'Integer') 228 WriteData(fid, prefix, 'object', self, 'fieldname', 'transfer_flag', 'format', 'Integer') 229 WriteData(fid, prefix, 'object', self, 'fieldname', 'unconfined_flag', 'format', 'Integer') 230 if self.sedimentlimit_flag == 1: 231 WriteData(fid, prefix, 'object', self, 'fieldname', 'sedimentlimit', 'format', 'Double') 232 233 if self.transfer_flag == 1: 234 WriteData(fid, prefix, 'object', self, 'fieldname', 'leakage_factor', 'format', 'Double') 235 236 WriteData(fid, prefix, 'object', self, 'fieldname', 'basal_moulin_input', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) 237 WriteData(fid, prefix, 'object', self, 'fieldname', 'spcsediment_head', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) 238 WriteData(fid, prefix, 'object', self, 'fieldname', 'sediment_compressibility', 'format', 'Double') 239 WriteData(fid, prefix, 'object', self, 'fieldname', 'sediment_porosity', 'format', 'Double') 240 WriteData(fid, prefix, 'object', self, 'fieldname', 'sediment_thickness', 'format', 'Double') 241 WriteData(fid, prefix, 'object', self, 'fieldname', 'sediment_transmitivity', 'format', 'DoubleMat', 'mattype', 1) 242 WriteData(fid, prefix, 'object', self, 'fieldname', 'mask_thawed_node', 'format', 'DoubleMat', 'mattype', 1) 243 244 if self.isefficientlayer == 1: 245 WriteData(fid, prefix, 'object', self, 'fieldname', 'spcepl_head', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts) 246 WriteData(fid, prefix, 'object', self, 'fieldname', 'mask_eplactive_node', 'format', 'DoubleMat', 'mattype', 1) 247 WriteData(fid, prefix, 'object', self, 'fieldname', 'epl_compressibility', 'format', 'Double') 248 WriteData(fid, prefix, 'object', self, 'fieldname', 'epl_porosity', 'format', 'Double') 249 WriteData(fid, prefix, 'object', self, 'fieldname', 'epl_max_thickness', 'format', 'Double') 250 WriteData(fid, prefix, 'object', self, 'fieldname', 'epl_initial_thickness', 'format', 'Double') 251 WriteData(fid, prefix, 'object', self, 'fieldname', 'epl_colapse_thickness', 'format', 'Double') 252 WriteData(fid, prefix, 'object', self, 'fieldname', 'epl_thick_comp', 'format', 'Integer') 253 WriteData(fid, prefix, 'object', self, 'fieldname', 'epl_conductivity', 'format', 'Double') 254 WriteData(fid, prefix, 'object', self, 'fieldname', 'eplflip_lock', 'format', 'Integer') 255 256 #process requested outputs 257 outputs = self.requested_outputs 258 indices = [i for i, x in enumerate(outputs) if x == 'default'] 259 if len(indices) > 0: 260 outputscopy = outputs[0:max(0, indices[0] - 1)] + self.defaultoutputs(md) + outputs[indices[0] + 1:] 261 outputs = outputscopy 262 WriteData(fid, prefix, 'data', outputs, 'name', 'md.hydrology.requested_outputs', 'format', 'StringArray') 263 # }}} -
issm/trunk-jpl/src/m/classes/model.py
r23788 r23870 753 753 if md.inversion.max_parameters.size > 1: 754 754 md.inversion.max_parameters = project2d(md, md.inversion.max_parameters, md.mesh.numberoflayers) 755 if not np.isnan(md.smb.mass_balance).all(): 756 md.smb.mass_balance = project2d(md, md.smb.mass_balance, md.mesh.numberoflayers) 755 if hasattr(md.smb, 'mass_balance'): 756 if not np.isnan(md.smb.mass_balance).all(): 757 md.smb.mass_balance = project2d(md, md.smb.mass_balance, md.mesh.numberoflayers) 757 758 758 759 #results -
issm/trunk-jpl/src/m/consistency/ismodelselfconsistent.py
r23716 r23870 1 def AnalysisConfiguration(solutiontype): #{{{2 3 1 def AnalysisConfiguration(solutiontype): #{{{ 2 """ 3 ANALYSISCONFIGURATION - return type of analyses, number of analyses 4 4 5 6 7 5 Usage: 6 [analyses]=AnalysisConfiguration(solutiontype); 7 """ 8 8 9 ifsolutiontype == 'StressbalanceSolution':10 analyses=['StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis']11 12 analyses=['StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis']13 14 analyses=['EnthalpyAnalysis','ThermalAnalysis','MeltingAnalysis']15 16 analyses=['MasstransportAnalysis']17 18 analyses=['BalancethicknessAnalysis']19 20 analyses=['L2ProjectionBaseAnalysis']21 22 analyses=['BalancevelocityAnalysis']23 24 analyses=['L2ProjectionBaseAnalysis']25 26 analyses=['GiaIvinsAnalysis']27 28 analyses=['LoveAnalysis']29 30 analyses=['StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis','MasstransportAnalysis']31 32 analyses=['L2ProjectionBaseAnalysis','HydrologyShreveAnalysis','HydrologyDCInefficientAnalysis','HydrologyDCEfficientAnalysis']33 34 analyses=['DamageEvolutionAnalysis']9 if solutiontype == 'StressbalanceSolution': 10 analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis'] 11 elif solutiontype == 'SteadystateSolution': 12 analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis', 'ThermalAnalysis', 'MeltingAnalysis', 'EnthalpyAnalysis'] 13 elif solutiontype == 'ThermalSolution': 14 analyses = ['EnthalpyAnalysis', 'ThermalAnalysis', 'MeltingAnalysis'] 15 elif solutiontype == 'MasstransportSolution': 16 analyses = ['MasstransportAnalysis'] 17 elif solutiontype == 'BalancethicknessSolution': 18 analyses = ['BalancethicknessAnalysis'] 19 elif solutiontype == 'SurfaceSlopeSolution': 20 analyses = ['L2ProjectionBaseAnalysis'] 21 elif solutiontype == 'BalancevelocitySolution': 22 analyses = ['BalancevelocityAnalysis'] 23 elif solutiontype == 'BedSlopeSolution': 24 analyses = ['L2ProjectionBaseAnalysis'] 25 elif solutiontype == 'GiaSolution': 26 analyses = ['GiaIvinsAnalysis'] 27 elif solutiontype == 'LoveSolution': 28 analyses = ['LoveAnalysis'] 29 elif solutiontype == 'TransientSolution': 30 analyses = ['StressbalanceAnalysis', 'StressbalanceVerticalAnalysis', 'StressbalanceSIAAnalysis', 'L2ProjectionBaseAnalysis', 'ThermalAnalysis', 'MeltingAnalysis', 'EnthalpyAnalysis', 'MasstransportAnalysis'] 31 elif solutiontype == 'HydrologySolution': 32 analyses = ['L2ProjectionBaseAnalysis', 'HydrologyShreveAnalysis', 'HydrologyDCInefficientAnalysis', 'HydrologyDCEfficientAnalysis'] 33 elif 'DamageEvolutionSolution': 34 analyses = ['DamageEvolutionAnalysis'] 35 35 36 37 36 else: 37 raise TypeError("solution type: '%s' not supported yet!" % solutiontype) 38 38 39 39 return analyses 40 40 #}}} 41 41 42 42 43 def ismodelselfconsistent(md): 43 44 44 """ 45 ISMODELSELFCONSISTENT - check that model forms a closed form solvable problem. 45 46 46 47 48 47 Usage: 48 ismodelselfconsistent(md), 49 """ 49 50 50 51 md.private.isconsistent=True51 #initialize consistency as true 52 md.private.isconsistent = True 52 53 53 54 solution=md.private.solution55 analyses=AnalysisConfiguration(solution)54 #Get solution and associated analyses 55 solution = md.private.solution 56 analyses = AnalysisConfiguration(solution) 56 57 57 #Go through a model fields, check that it is a class,and call checkconsistency58 fields=vars(md)59 #for field in fields.iterkeys():60 58 #Go through a model fields, check that it is a class, and call checkconsistency 59 #fields = vars(md) 60 #for field in fields.iterkeys(): 61 for field in md.properties(): 61 62 62 63 if field in ['results','debug','radaroverlay']:64 63 #Some properties do not need to be checked 64 if field in ['results', 'debug', 'radaroverlay']: 65 continue 65 66 66 67 if not hasattr(getattr(md,field),'checkconsistency'):68 67 #Check that current field is an object 68 if not hasattr(getattr(md, field), 'checkconsistency'): 69 md.checkmessage("field '%s' is not an object." % field) 69 70 70 71 exec("md.{}.checkconsistency(md,solution,analyses)".format(field))71 #Check consistency of the object 72 exec("md.{}.checkconsistency(md, solution, analyses)".format(field)) 72 73 73 74 75 raise RuntimeError('Model not consistent,see messages above.')74 #error message if mode is not consistent 75 if not md.private.isconsistent: 76 raise RuntimeError('Model not consistent, see messages above.') -
issm/trunk-jpl/src/m/contrib/defleurian/netCDF/export_netCDF.py
r23772 r23870 9 9 if path.exists(filename): 10 10 print('File {} allready exist'.format(filename)) 11 newname = eval(input('Give a new name or "delete" to replace: '))11 newname = input('Give a new name or "delete" to replace: ') 12 12 if newname == 'delete': 13 13 remove(filename) … … 27 27 dimindex = 1 28 28 dimlist = [2, md.mesh.numberofelements, md.mesh.numberofvertices, np.shape(md.mesh.elements)[1]] 29 print('===Creating dimensions===') 29 30 for i in range(0, 4): 30 31 if dimlist[i] not in list(DimDict.keys()): 31 32 dimindex += 1 32 NewDim = NCData.createDimension('DimNum' +str(dimindex), dimlist[i])33 NewDim = NCData.createDimension('DimNum' + str(dimindex), dimlist[i]) 33 34 DimDict[len(NewDim)] = 'DimNum' + str(dimindex) 34 35 typelist = [bool, str, str, int, float, complex, … … 37 38 groups = dict.keys(md.__dict__) 38 39 # get all model classes and create respective groups 40 print('===Creating and populating groups===') 39 41 for group in groups: 40 42 NCgroup = NCData.createGroup(str(group)) … … 65 67 Listgroup = Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[naming])) 66 68 except AttributeError: 67 Listgroup =Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__class__.__name__)+str(listindex))68 Listgroup.__setattr__('classtype', md.__dict__[group].__dict__[field].__getitem__(listindex).__class__.__name__)69 Listgroup = Subgroup.createGroup(str(md.__dict__[group].__dict__[field].__class__.__name__) + str(listindex)) 70 Listgroup.__setattr__('classtype', md.__dict__[group].__dict__[field].__getitem__(listindex).__class__.__name__) 69 71 try: 70 subfields =dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__)72 subfields = dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__) 71 73 except AttributeError: 72 subfields =dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex))74 subfields = dict.keys(md.__dict__[group].__dict__[field].__getitem__(listindex)) 73 75 for subfield in subfields: 74 if subfield !='outlog':76 if subfield != 'outlog': 75 77 try: 76 Var =md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[subfield]78 Var = md.__dict__[group].__dict__[field].__getitem__(listindex).__dict__[subfield] 77 79 except AttributeError: 78 80 Var = md.__dict__[group].__dict__[field].__getitem__(listindex)[subfield] 79 DimDict = CreateVar(NCData, Var,subfield,Listgroup,DimDict,md.__dict__[group],field,listindex)81 DimDict = CreateVar(NCData, Var, subfield, Listgroup, DimDict, md.__dict__[group], field, listindex) 80 82 # No subgroup, we directly treat the variable 81 83 elif type(md.__dict__[group].__dict__[field]) in typelist or field == 'bamg': … … 90 92 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) 91 93 Var = md.__dict__[group].__dict__[field].data 92 DimDict = CreateVar(NCData, Var,field,NCgroup,DimDict)94 DimDict = CreateVar(NCData, Var, field, NCgroup, DimDict) 93 95 else: 94 96 NCgroup.__setattr__('classtype', str(group)) … … 186 188 except KeyError: 187 189 index = len(DimDict) + 1 # if the dimension does not exist, increment naming 188 NewDim = NCData.createDimension('DimNum' +str(index), val_shape) # create dimension190 NewDim = NCData.createDimension('DimNum' + str(index), val_shape) # create dimension 189 191 DimDict[len(NewDim)] = 'DimNum' + str(index) # and update the dimension dictionary 190 192 output = [str(DimDict[val_shape])] + [DimDict[2]] # now proceed with the shape of the value -
issm/trunk-jpl/src/m/contrib/defleurian/paraview/exportVTK.py
r23716 r23870 1 1 import numpy as np 2 import os 3 import model 4 import glob 5 def exportVTK(filename,model,*args): 6 ''' 7 vtk export 8 function exportVTK(filename,model) 9 creates a directory with the vtk files for displays in paraview 10 (only work for triangle and wedges based on their number of nodes) 11 12 Give only the results for nw but could be extended to geometry, mask... 13 14 input: filename destination 15 (string) 16 ------------------------------------------------------------------ 17 model this is md 18 ------------------------------------------------------------------ 19 By default only the results are exported, you can add whichever 20 field you need as a string: 21 add 'geometry' to export md.geometry 22 23 Basile de Fleurian: 24 ''' 25 Dir=os.path.basename(filename) 26 Path=filename[:-len(Dir)] 27 28 if os.path.exists(filename): 29 print(('File {} allready exist'.format(filename))) 30 newname=eval(input('Give a new name or "delete" to replace: ')) 31 if newname=='delete': 32 filelist = glob.glob(filename+'/*') 33 for oldfile in filelist: 34 os.remove(oldfile) 35 else: 36 print(('New file name is {}'.format(newname))) 37 filename=newname 38 os.mkdir(filename) 39 else: 40 os.mkdir(filename) 41 42 # {{{ get the element related variables 43 if 'z' in dict.keys(model.mesh.__dict__): 44 points=np.column_stack((model.mesh.x,model.mesh.y,model.mesh.z)) 45 dim=3 46 else: 47 points=np.column_stack((model.mesh.x,model.mesh.y,np.zeros(np.shape(model.mesh.x)))) 48 dim=2 49 50 num_of_points=np.size(model.mesh.x) 51 num_of_elt=np.shape(model.mesh.elements)[0] 52 point_per_elt=np.shape(model.mesh.elements)[1] 53 # }}} 54 # {{{ Select the type of element function of the number of nodes per elements 55 if point_per_elt==3: 56 celltype=5 #triangles 57 elif point_per_elt==6: 58 celltype=13 #wedges 59 else: 60 error('Your Element definition is not taken into account \n') 61 # }}} 62 # {{{ this is the result structure 63 res_struct=model.results 64 if (len(res_struct.__dict__)>0): 65 #Getting all the solutions of the model 66 solnames=(dict.keys(res_struct.__dict__)) 67 num_of_sols=len(solnames) 68 num_of_timesteps=1 69 #%building solutionstructure 70 for solution in solnames: 71 #looking for multiple time steps 72 if (np.size(res_struct.__dict__[solution])>num_of_timesteps): 73 num_of_timesteps=np.size(res_struct.__dict__[solution]) 74 num_of_timesteps=int(num_of_timesteps) 75 else: 76 num_of_timesteps=1 77 # }}} 78 # {{{ write header and mesh 79 for step in range(0,num_of_timesteps): 80 timestep=step 81 fid=open((filename +'/Timestep.vtk'+str(timestep)+'.vtk'),'w+') 82 fid.write('# vtk DataFile Version 2.0 \n') 83 fid.write('Data for run %s \n' % model.miscellaneous.name) 84 fid.write('ASCII \n') 85 fid.write('DATASET UNSTRUCTURED_GRID \n') 86 fid.write('POINTS %d float\n' % num_of_points) 87 if(dim==3): 88 for point in points: 89 fid.write('%f %f %f \n'%(point[0], point[1], point[2])) 90 elif(dim==2): 91 for point in points: 92 fid.write('%f %f %f \n'%(point[0], point[1], point[2])) 93 94 fid.write('CELLS %d %d\n' %(num_of_elt, num_of_elt*(point_per_elt+1))) 95 96 if point_per_elt==3: 97 for elt in range(0, num_of_elt): 98 fid.write('3 %d %d %d\n' %(model.mesh.elements[elt,0]-1,model.mesh.elements[elt,1]-1,model.mesh.elements[elt,2]-1)) 99 elif point_per_elt==6: 100 for elt in range(0, num_of_elt): 101 fid.write('6 %d %d %d %d %d %d\n' %(model.mesh.elements[elt,0]-1,model.mesh.elements[elt,1]-1,model.mesh.elements[elt,2]-1,model.mesh.elements[elt,3]-1,model.mesh.elements[elt,4]-1,model.mesh.elements[elt,5]-1)) 102 else: 103 print('Number of nodes per element not supported') 104 105 fid.write('CELL_TYPES %d\n' %num_of_elt) 106 for elt in range(0, num_of_elt): 107 fid.write('%d\n' %celltype) 108 109 fid.write('POINT_DATA %s \n' %str(num_of_points)) 110 # }}} 111 # {{{ loop over the different solution structures 112 if 'solnames' in locals(): 113 for sol in solnames: 114 #dealing with results on different timesteps 115 if(np.size(res_struct.__dict__[sol])>timestep): 116 timestep = step 117 else: 118 timestep = np.size(res_struct.__dict__[sol]) 119 120 #getting the fields in the solution 121 if(np.size(res_struct.__dict__[sol])>1): 122 fieldnames=dict.keys(res_struct.__dict__[sol].__getitem__(timestep).__dict__) 123 else: 124 fieldnames=dict.keys(res_struct.__dict__[sol].__dict__) 125 #check which field is a real result and print 126 for field in fieldnames: 127 if(np.size(res_struct.__dict__[sol])>1): 128 fieldstruct=res_struct.__dict__[sol].__getitem__(timestep).__dict__[field] 129 else: 130 fieldstruct=res_struct.__dict__[sol].__dict__[field] 131 132 if ((np.size(fieldstruct))==num_of_points): 133 fid.write('SCALARS %s float 1 \n' % field) 134 fid.write('LOOKUP_TABLE default\n') 135 for node in range(0,num_of_points): 136 #paraview does not like NaN, replacing 137 if np.isnan(fieldstruct[node]): 138 fid.write('%e\n' % -9999.9999) 139 #also checking for verry small value that mess up 140 elif (abs(fieldstruct[node])<1.0e-20): 141 fid.write('%e\n' % 0.0) 142 else: 143 fid.write('%e\n' % fieldstruct[node]) 144 # }}} 145 # {{{ loop on arguments, if something other than result is asked, do it now 146 for other in args: 147 other_struct=model.__dict__[other] 148 othernames=(dict.keys(other_struct.__dict__)) 149 for field in othernames: 150 if np.ndim(other_struct.__dict__[field])==1: 151 if np.size(other_struct.__dict__[field])==num_of_points: 152 fid.write('SCALARS %s float 1 \n' % field) 153 fid.write('LOOKUP_TABLE default\n') 154 for node in range(0,num_of_points): 155 #paraview does not like NaN, replacing 156 if np.isnan(other_struct.__dict__[field][node]): 157 fid.write('%e\n' % -9999.9999) 158 #also checking for verry small value that mess up 159 elif (abs(other_struct.__dict__[field][node])<1.0e-20): 160 fid.write('%e\n' % 0.0) 161 else: 162 fid.write('%e\n' % other_struct.__dict__[field][node]) 163 elif np.ndim(other_struct.__dict__[field])==2: 164 #deal with forcings 165 if np.shape(other_struct.__dict__[field])[0]==num_of_points+1: 166 current_time=res_struct.__dict__[sol].__getitem__(timestep).__dict__['time']/model.__dict__['constants'].__dict__['yts'] 167 times=other_struct.__dict__[field][-1,:] 168 if np.any(times==current_time): 169 time_loc=np.where(times==current_time) 170 current_force=other_struct.__dict__[field][:-1,time_loc] 171 else: 172 precede_time_loc=np.where(times<current_time)[0][-1] 173 follow_time_loc=np.where(times>current_time)[0][0] 174 time_scaling=(current_time-times[precede_time_loc])/(times[follow_time_loc]-times[precede_time_loc]) 175 current_force=other_struct.__dict__[field][:-1,precede_time_loc]+(other_struct.__dict__[field][:-1,follow_time_loc]-other_struct.__dict__[field][:-1,precede_time_loc])*time_scaling 176 fid.write('SCALARS %s float 1 \n' % field) 177 fid.write('LOOKUP_TABLE default\n') 178 for node in range(0,num_of_points): 179 #paraview does not like NaN, replacing 180 if np.isnan(current_force[node]): 181 fid.write('%e\n' % -9999.9999) 182 #also checking for verry small value that mess up 183 elif (abs(current_force[node])<1.0e-20): 184 fid.write('%e\n' % 0.0) 185 else: 186 fid.write('%e\n' % current_force[node]) 187 # reloaded variable are generally of dim 2 188 elif np.shape(other_struct.__dict__[field])[0]==num_of_points: 189 # we want only vector 190 if np.shape(other_struct.__dict__[field])[1]==1: 191 fid.write('SCALARS %s float 1 \n' % field) 192 fid.write('LOOKUP_TABLE default\n') 193 for node in range(0,num_of_points): 194 #paraview does not like NaN, replacing 195 print((other_struct.__dict__[field][node])) 196 if np.isnan(other_struct.__dict__[field][node]): 197 fid.write('%e\n' % -9999.9999) 198 #also checking for verry small value that mess up 199 elif (abs(other_struct.__dict__[field][node])<1.0e-20): 200 fid.write('%e\n' % 0.0) 201 else: 202 fid.write('%e\n' % other_struct.__dict__[field][node]) 203 # }}} 204 fid.close(); 2 from os import path, remove, mkdir 3 from glob import glob 4 5 6 def exportVTK(filename, md, *args, enveloppe=False): 7 ''' 8 vtk export 9 function exportVTK(filename,md) 10 creates a directory with the vtk files for displays in paraview 11 (only work for triangle and wedges based on their number of nodes) 12 13 Usage: 14 exportVTK('DirName',md) 15 exportVTK('DirName',md,'geometry','mesh') 16 exportVTK('DirName',md,'geometry','mesh',enveloppe=True) 17 18 DirName is the name of the output directory, each timestep then has it 19 own file ('Timestep.vtkX.vtk') with X the number of the output step 20 enveloppe is an option keeping only the enveloppe of the md (it is False by default) 21 22 TODO: - make time easily accessible 23 - make evolving geometry 24 25 Basile de Fleurian: 26 ''' 27 28 # File checking and creation {{{ 29 Dir = path.basename(filename) 30 Path = filename[:-len(Dir)] 31 if path.exists(filename): 32 print(('File {} allready exist'.format(filename))) 33 newname = input('Give a new name or "delete" to replace: ') 34 if newname == 'delete': 35 filelist = glob(filename + '/*') 36 for oldfile in filelist: 37 remove(oldfile) 38 else: 39 print(('New file name is {}'.format(newname))) 40 filename = newname 41 mkdir(filename) 42 else: 43 mkdir(filename) 44 # }}} 45 46 # this is the result structure {{{ 47 print('Getting accessorie variables') 48 res_struct = md.results 49 moving_mesh = False 50 if(type(res_struct) != list): 51 #Getting all the solutions of the md 52 solnames = dict.keys(res_struct.__dict__) 53 num_of_sols = len(solnames) 54 num_of_timesteps = 1 55 #%building solutionstructure 56 for solution in solnames: 57 #looking for multiple time steps 58 if (np.size(res_struct.__dict__[solution]) > num_of_timesteps): 59 num_of_timesteps = np.size(res_struct.__dict__[solution]) 60 num_of_timesteps = int(num_of_timesteps) 61 if 'Surface' in dict.keys(res_struct.__dict__[solution][0].__dict__): 62 moving_mesh = True 63 else: 64 num_of_timesteps = 1 65 # }}} 66 67 # get the element related variables {{{ 68 print('Now treating the mesh') 69 #first get the general things 70 dim = int(md.mesh.domaintype()[0]) 71 every_nodes = md.mesh.numberofvertices 72 every_cells = md.mesh.numberofelements 73 74 if np.shape(md.mesh.elements)[1] == 3 or enveloppe: 75 point_per_elt = 3 76 celltype = 5 #triangles 77 elif np.shape(md.mesh.elements)[1] == 6: 78 point_per_elt = 6 79 celltype = 13 #wedges 80 else: 81 raise BadDimension('exportVTK does not support your element type') 82 83 if enveloppe: 84 if dim == 3: 85 is_enveloppe = np.logical_or(md.mesh.vertexonbase, md.mesh.vertexonsurface) 86 enveloppe_index = np.where(is_enveloppe)[0] 87 convert_index = np.nan * np.ones(np.shape(md.mesh.x)) 88 convert_index = np.asarray([[i, np.where(enveloppe_index == i)[0][0]] for i, val in enumerate(convert_index) if any(enveloppe_index == i)]) 89 points = np.column_stack((md.mesh.x[enveloppe_index], 90 md.mesh.y[enveloppe_index], 91 md.mesh.z[enveloppe_index])) 92 93 num_of_elt = np.size(np.where(np.isnan(md.mesh.lowerelements))) + np.size(np.where(np.isnan(md.mesh.upperelements))) 94 connect = md.mesh.elements[np.where(is_enveloppe[md.mesh.elements - 1])].reshape(int(num_of_elt), 3) - 1 95 for elt in range(0, num_of_elt): 96 connect[elt, 0] = convert_index[np.where(convert_index == connect[elt, 0])[0], 1][0] 97 connect[elt, 1] = convert_index[np.where(convert_index == connect[elt, 1])[0], 1][0] 98 connect[elt, 2] = convert_index[np.where(convert_index == connect[elt, 2])[0], 1][0] 99 100 num_of_points = np.size(enveloppe_index) 101 else: 102 raise BadDimension("exportVTK can't get an enveloppe for dimension {}".format(dim)) 103 104 else: 105 #we get all the mesh, mainly defining dummies 106 num_of_elt = every_cells 107 connect = md.mesh.elements - 1 108 enveloppe_index = np.arange(0, np.size(md.mesh.x)) 109 num_of_points = every_nodes 110 if dim == 2: 111 points = np.column_stack((md.mesh.x, md.mesh.y, md.geometry.surface)) 112 elif dim == 3: 113 points = np.column_stack((md.mesh.x, md.mesh.y, md.mesh.z)) 114 else: 115 raise BadDimension('exportVTK does not support dimension {}'.format(dim)) 116 # }}} 117 # write header and mesh {{{ 118 print('Now starting to write stuff') 119 for step in range(0, num_of_timesteps): 120 print('Writing for step {}'.format(step)) 121 saved_cells = {} 122 timestep = step 123 fid = open((filename + '/Timestep.vtk' + str(timestep) + '.vtk'), 'w+') 124 fid.write('# vtk DataFile Version 3.0 \n') 125 fid.write('Data for run {} \n'.format(md.miscellaneous.name)) 126 fid.write('ASCII \n') 127 fid.write('DATASET UNSTRUCTURED_GRID \n') 128 fid.write('POINTS {:d} float\n'.format(num_of_points)) 129 #updating z for mesh evolution 130 if moving_mesh: 131 base = np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Base'][enveloppe_index]) 132 thick_change_ratio = (np.squeeze(res_struct.__dict__['TransientSolution'][step].__dict__['Thickness'][enveloppe_index]) / md.geometry.thickness[enveloppe_index]) 133 above_bed = points[:, 2] - md.geometry.base[enveloppe_index] 134 altitude = base + thick_change_ratio * above_bed 135 else: 136 altitude = points[:, 2] 137 for index, point in enumerate(points): 138 fid.write('{:f} {:f} {:f} \n'.format(point[0], point[1], altitude[index])) 139 140 fid.write('CELLS {:d} {:d}\n'.format(num_of_elt, num_of_elt * (point_per_elt + 1))) 141 142 for elt in range(0, num_of_elt): 143 if celltype == 5: 144 fid.write('3 {:d} {:d} {:d}\n'.format(connect[elt, 0], 145 connect[elt, 1], 146 connect[elt, 2])) 147 elif celltype == 13: 148 fid.write('6 {:d} {:d} {:d} {:d} {:d} {:d}\n'.format(connect[elt, 0], 149 connect[elt, 1], 150 connect[elt, 2], 151 connect[elt, 3], 152 connect[elt, 4], 153 connect[elt, 5])) 154 155 fid.write('CELL_TYPES {:d}\n'.format(num_of_elt)) 156 for elt in range(0, num_of_elt): 157 fid.write('{:d}\n'.format(celltype)) 158 159 fid.write('POINT_DATA {:s} \n'.format(str(num_of_points))) 160 # }}} 161 # loop over the different solution structures{{{ 162 # first check if there are solutions to grab 163 if 'solnames' in locals(): 164 for sol in solnames: 165 treated_res = [] 166 #dealing with results on different timesteps 167 if(np.size(res_struct.__dict__[sol]) > timestep): 168 timestep = step 169 else: 170 timestep = np.size(res_struct.__dict__[sol]) 171 172 #getting the fields in the solution 173 if(type(res_struct.__dict__[sol]) == list): 174 spe_res_struct = res_struct.__dict__[sol].__getitem__(timestep) 175 fieldnames = dict.keys(spe_res_struct.__dict__) 176 else: 177 spe_res_struct = res_struct.__dict__[sol] 178 fieldnames = dict.keys(spe_res_struct.__dict__) 179 180 #Sorting scalars, vectors and tensors 181 tensors = [field for field in fieldnames if field[-2:] in ['xx', 'yy', 'xy', 'zz', 'xz', 'yz']] 182 non_tensor = [field for field in fieldnames if field not in tensors] 183 vectors = [field for field in non_tensor if field[-1] in ['x', 'y', 'z']] 184 185 #check which field is a real result and print 186 for field in fieldnames: 187 if field in treated_res: 188 continue 189 elif field in vectors: 190 try: 191 Vxstruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'x']) 192 Vystruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'y']) 193 if dim == 3: 194 Vzstruct = np.squeeze(spe_res_struct.__dict__[field[:-1] + 'z']) 195 treated_res += [field[:-1] + 'x', field[:-1] + 'y', field[:-1] + 'z'] 196 elif dim == 2: 197 treated_res += [field[:-1] + 'x', field[:-1] + 'y'] 198 except KeyError: 199 fieldnames += field 200 vectors.remove(field) 201 202 fid.write('VECTORS {} float \n'.format(field[:-1])) 203 for node in range(0, num_of_points): 204 Vx = cleanOutliers(Vxstruct[enveloppe_index[node]]) 205 Vy = cleanOutliers(Vystruct[enveloppe_index[node]]) 206 if dim == 3: 207 Vz = cleanOutliers(Vzstruct[enveloppe_index[node]]) 208 fid.write('{:f} {:f} {:f}\n'.format(Vx, Vy, Vz)) 209 elif dim == 2: 210 fid.write('{:f} {:f} {:f}\n'.format(Vx, Vy, 0)) 211 212 elif field in tensors: 213 print("nothing") 214 else: 215 if ((np.size(spe_res_struct.__dict__[field])) == every_nodes): 216 fid.write('SCALARS {} float 1 \n'.format(field)) 217 fid.write('LOOKUP_TABLE default\n') 218 for node in range(0, num_of_points): 219 outval = cleanOutliers(np.squeeze(spe_res_struct.__dict__[field][enveloppe_index[node]])) 220 fid.write('{:f}\n'.format(outval)) 221 elif ((np.size(spe_res_struct.__dict__[field])) == every_cells): 222 saved_cells[field] = np.squeeze(spe_res_struct.__dict__[field]) 223 # }}} 224 # loop on arguments, if something other than result is asked, do it now {{{ 225 for other in args: 226 other_struct = md.__dict__[other] 227 othernames = (dict.keys(other_struct.__dict__)) 228 for field in othernames: 229 if (np.size(other_struct.__dict__[field]) == every_nodes): 230 fid.write('SCALARS {} float 1 \n'.format(field)) 231 fid.write('LOOKUP_TABLE default\n') 232 for node in range(0, num_of_points): 233 outval = cleanOutliers(other_struct.__dict__[field][enveloppe_index[node]]) 234 fid.write('{:f}\n'.format(outval)) 235 elif (np.size(other_struct.__dict__[field]) == every_cells): 236 saved_cells[field] = other_struct.__dict__[field] 237 # }}} 238 # Now writting cell variables {{{ 239 if np.size(list(saved_cells.keys())) > 0: 240 fid.write('CELL_DATA {:d} \n'.format(num_of_elt)) 241 for key in list(saved_cells.keys()): 242 fid.write('SCALARS {} float 1 \n'.format(key)) 243 fid.write('LOOKUP_TABLE default\n') 244 for cell in range(0, num_of_elt): 245 outval = cleanOutliers(saved_cells[key][cell]) 246 fid.write('{:f}\n'.format(outval)) 247 # }}} 248 fid.close() 249 250 251 def cleanOutliers(Val): 252 #paraview does not like NaN, replacing 253 if np.isnan(Val): 254 CleanVal = -9999.999 255 #also checking for very small value that mess up 256 elif (abs(Val) < 1.0e-20): 257 CleanVal = 0.0 258 else: 259 CleanVal = Val 260 return CleanVal 261 262 263 class BadDimension(Exception): 264 """The required dimension is not supported yet.""" -
issm/trunk-jpl/src/m/extrusion/project3d.py
r23716 r23870 3 3 4 4 def project3d(md,*args): 5 6 5 """ 6 PROJECT3D - vertically project a vector from 2d mesh 7 7 8 9 This vector can be a node vector of size (md.mesh.numberofvertices2d,N/A) or an 10 element vector of size (md.mesh.numberofelements2d,N/A). 11 arguments: 12 13 'type': 'element' or 'node'. 14 options: 15 'layer' a layer number where vector should keep its values. If not specified, all layers adopt the 16 17 8 vertically project a vector from 2d mesh (split in noncoll and coll areas) into a 3d mesh. 9 This vector can be a node vector of size (md.mesh.numberofvertices2d,N/A) or an 10 element vector of size (md.mesh.numberofelements2d,N/A). 11 arguments: 12 'vector': 2d vector 13 'type': 'element' or 'node'. 14 options: 15 'layer' a layer number where vector should keep its values. If not specified, all layers adopt the 16 value of the 2d vector. 17 'padding': default to 0 (value adopted by other 3d layers not being projected 18 18 19 20 21 22 23 19 Examples: 20 extruded_vector=project3d(md,'vector',vector2d,'type','node','layer',1,'padding',NaN) 21 extruded_vector=project3d(md,'vector',vector2d,'type','element','padding',0) 22 extruded_vector=project3d(md,'vector',vector2d,'type','node') 23 """ 24 24 25 26 27 28 29 25 #some regular checks 26 if not md: 27 raise TypeError("bad usage") 28 if md.mesh.domaintype().lower() != '3d': 29 raise TypeError("input model is not 3d") 30 30 31 32 options= pairoptions(*args)33 vector2d= options.getfieldvalue('vector') #mandatory34 vectype= options.getfieldvalue('type') #mandatory35 layer = options.getfieldvalue('layer',0) #optional (do all layers otherwise)36 paddingvalue = options.getfieldvalue('padding',0) #0 by default31 #retrieve parameters from options. 32 options = pairoptions(*args) 33 vector2d = options.getfieldvalue('vector') #mandatory 34 vectype = options.getfieldvalue('type') #mandatory 35 layer = options.getfieldvalue('layer', 0) #optional (do all layers otherwise) 36 paddingvalue = options.getfieldvalue('padding', 0) #0 by default 37 37 38 vector1d=False39 if isinstance(vector2d,np.ndarray) and np.ndim(vector2d)==1:40 vector1d=True41 vector2d=vector2d.reshape(-1,)38 vector1d = False 39 if isinstance(vector2d, np.ndarray) and np.ndim(vector2d) == 1: 40 vector1d = True 41 vector2d = vector2d.reshape(-1,) 42 42 43 if isinstance(vector2d,(bool,int,float)) or np.size(vector2d)==1:44 projected_vector=vector2d43 if isinstance(vector2d, (bool, int, float)) or np.size(vector2d) == 1: 44 projected_vector = vector2d 45 45 46 elif vectype.lower()=='node': 46 elif vectype.lower() == 'node': 47 #Initialize 3d vector 48 if np.ndim(vector2d) == 1: 49 if vector2d.shape[0] == md.mesh.numberofvertices2d: 50 projected_vector = (paddingvalue * np.ones((md.mesh.numberofvertices))).astype(vector2d.dtype) 51 elif vector2d.shape[0] == md.mesh.numberofvertices2d + 1: 52 projected_vector = (paddingvalue * np.ones((md.mesh.numberofvertices + 1))).astype(vector2d.dtype) 53 projected_vector[-1] = vector2d[-1] 54 vector2d = vector2d[:-1] 55 else: 56 raise TypeError("vector length not supported") 57 #Fill in 58 if layer == 0: 59 for i in range(md.mesh.numberoflayers): 60 projected_vector[(i * md.mesh.numberofvertices2d):((i + 1) * md.mesh.numberofvertices2d)] = vector2d 61 else: 62 projected_vector[((layer - 1) * md.mesh.numberofvertices2d):(layer * md.mesh.numberofvertices2d)] = vector2d 63 else: 64 if vector2d.shape[0] == md.mesh.numberofvertices2d: 65 projected_vector = (paddingvalue * np.ones((md.mesh.numberofvertices, np.size(vector2d, axis=1)))).astype(vector2d.dtype) 66 elif vector2d.shape[0] == md.mesh.numberofvertices2d + 1: 67 projected_vector = (paddingvalue * np.ones((md.mesh.numberofvertices + 1, np.size(vector2d, axis=1)))).astype(vector2d.dtype) 68 projected_vector[-1, :] = vector2d[-1, :] 69 vector2d = vector2d[:-1, :] 70 else: 71 raise TypeError("vector length not supported") 72 #Fill in 73 if layer == 0: 74 for i in range(md.mesh.numberoflayers): 75 projected_vector[(i * md.mesh.numberofvertices2d):((i + 1) * md.mesh.numberofvertices2d), :] = vector2d 76 else: 77 projected_vector[((layer - 1) * md.mesh.numberofvertices2d):(layer * md.mesh.numberofvertices2d), :] = vector2d 47 78 48 #Initialize 3d vector 49 if np.ndim(vector2d)==1: 50 if vector2d.shape[0]==md.mesh.numberofvertices2d: 51 projected_vector=(paddingvalue*np.ones((md.mesh.numberofvertices))).astype(vector2d.dtype) 52 elif vector2d.shape[0]==md.mesh.numberofvertices2d+1: 53 projected_vector=(paddingvalue*np.ones((md.mesh.numberofvertices+1))).astype(vector2d.dtype) 54 projected_vector[-1]=vector2d[-1] 55 vector2d=vector2d[:-1] 56 else: 57 raise TypeError("vector length not supported") 58 #Fill in 59 if layer==0: 60 for i in range(md.mesh.numberoflayers): 61 projected_vector[(i*md.mesh.numberofvertices2d):((i+1)*md.mesh.numberofvertices2d)]=vector2d 62 else: 63 projected_vector[((layer-1)*md.mesh.numberofvertices2d):(layer*md.mesh.numberofvertices2d)]=vector2d 64 else: 65 if vector2d.shape[0]==md.mesh.numberofvertices2d: 66 projected_vector=(paddingvalue*np.ones((md.mesh.numberofvertices,np.size(vector2d,axis=1)))).astype(vector2d.dtype) 67 elif vector2d.shape[0]==md.mesh.numberofvertices2d+1: 68 projected_vector=(paddingvalue*np.ones((md.mesh.numberofvertices+1,np.size(vector2d,axis=1)))).astype(vector2d.dtype) 69 projected_vector[-1,:]=vector2d[-1,:] 70 vector2d=vector2d[:-1,:] 71 else: 72 raise TypeError("vector length not supported") 73 #Fill in 74 if layer==0: 75 for i in range(md.mesh.numberoflayers): 76 projected_vector[(i*md.mesh.numberofvertices2d):((i+1)*md.mesh.numberofvertices2d),:]=vector2d 77 else: 78 projected_vector[((layer-1)*md.mesh.numberofvertices2d):(layer*md.mesh.numberofvertices2d),:]=vector2d 79 elif vectype.lower() == 'element': 80 #Initialize 3d vector 81 if np.ndim(vector2d) == 1: 82 if vector2d.shape[0] == md.mesh.numberofelements2d: 83 projected_vector = (paddingvalue * np.ones((md.mesh.numberofelements))).astype(vector2d.dtype) 84 elif vector2d.shape[0] == md.mesh.numberofelements2d + 1: 85 projected_vector = (paddingvalue * np.ones((md.mesh.numberofelements + 1))).astype(vector2d.dtype) 86 projected_vector[-1] = vector2d[-1] 87 vector2d = vector2d[:-1] 88 else: 89 raise TypeError("vector length not supported") 90 #Fill in 91 if layer == 0: 92 for i in range(md.mesh.numberoflayers - 1): 93 projected_vector[(i * md.mesh.numberofelements2d):((i + 1) * md.mesh.numberofelements2d)] = vector2d 94 else: 95 projected_vector[((layer - 1) * md.mesh.numberofelements2d):(layer * md.mesh.numberofelements2d)] = vector2d 96 else: 97 if vector2d.shape[0] == md.mesh.numberofelements2d: 98 projected_vector = (paddingvalue * np.ones((md.mesh.numberofelements, np.size(vector2d, axis=1)))).astype(vector2d.dtype) 99 elif vector2d.shape[0] == md.mesh.numberofelements2d + 1: 100 projected_vector = (paddingvalue * np.ones((md.mesh.numberofelements + 1, np.size(vector2d, axis=1)))).astype(vector2d.dtype) 101 projected_vector[-1, :] = vector2d[-1, :] 102 vector2d = vector2d[:-1, :] 103 else: 104 raise TypeError("vector length not supported") 105 #Fill in 106 if layer == 0: 107 for i in range(md.mesh.numberoflayers - 1): 108 projected_vector[(i * md.mesh.numberofelements2d):((i + 1) * md.mesh.numberofelements2d), :] = vector2d 109 else: 110 projected_vector[((layer - 1) * md.mesh.numberofelements2d):(layer * md.mesh.numberofelements2d), :] = vector2d 79 111 112 else: 113 raise TypeError("project3d error message: unknown projection type") 80 114 81 elif vectype.lower()=='element': 115 if vector1d: 116 projected_vector = projected_vector.reshape(-1,) 82 117 83 #Initialize 3d vector 84 if np.ndim(vector2d)==1: 85 if vector2d.shape[0]==md.mesh.numberofelements2d: 86 projected_vector=(paddingvalue*np.ones((md.mesh.numberofelements))).astype(vector2d.dtype) 87 elif vector2d.shape[0]==md.mesh.numberofelements2d+1: 88 projected_vector=(paddingvalue*np.ones((md.mesh.numberofelements+1))).astype(vector2d.dtype) 89 projected_vector[-1]=vector2d[-1] 90 vector2d=vector2d[:-1] 91 else: 92 raise TypeError("vector length not supported") 93 #Fill in 94 if layer==0: 95 for i in range(md.mesh.numberoflayers-1): 96 projected_vector[(i*md.mesh.numberofelements2d):((i+1)*md.mesh.numberofelements2d)]=vector2d 97 else: 98 projected_vector[((layer-1)*md.mesh.numberofelements2d):(layer*md.mesh.numberofelements2d)]=vector2d 99 else: 100 if vector2d.shape[0]==md.mesh.numberofelements2d: 101 projected_vector=(paddingvalue*np.ones((md.mesh.numberofelements, np.size(vector2d,axis=1)))).astype(vector2d.dtype) 102 elif vector2d.shape[0]==md.mesh.numberofelements2d+1: 103 projected_vector=(paddingvalue*np.ones((md.mesh.numberofelements+1,np.size(vector2d,axis=1)))).astype(vector2d.dtype) 104 projected_vector[-1,:]=vector2d[-1,:] 105 vector2d=vector2d[:-1,:] 106 else: 107 raise TypeError("vector length not supported") 108 #Fill in 109 if layer==0: 110 for i in range(md.mesh.numberoflayers-1): 111 projected_vector[(i*md.mesh.numberofelements2d):((i+1)*md.mesh.numberofelements2d),:]=vector2d 112 else: 113 projected_vector[((layer-1)*md.mesh.numberofelements2d):(layer*md.mesh.numberofelements2d),:]=vector2d 114 115 else: 116 raise TypeError("project3d error message: unknown projection type") 117 118 if vector1d: 119 projected_vector=projected_vector.reshape(-1,) 120 121 return projected_vector 118 return projected_vector -
issm/trunk-jpl/src/m/plot/plotmodel.py
r23716 r23870 1 1 import numpy as np 2 2 from plotoptions import plotoptions 3 from plotdoc import plotdoc4 3 from plot_manager import plot_manager 5 4 from math import ceil, sqrt 6 5 7 6 try: 8 import pylab as p 9 import matplotlib.pyplot as plt 10 from mpl_toolkits.axes_grid1 import ImageGrid, AxesGrid 11 from mpl_toolkits.mplot3d import Axes3D 7 import matplotlib.pyplot as plt 8 from mpl_toolkits.axes_grid1 import ImageGrid 12 9 except ImportError: 13 print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled") 14 15 def plotmodel(md,*args): 16 ''' at command prompt, type 'plotdoc()' for additional documentation 17 ''' 18 19 #First process options 20 options=plotoptions(*args) 21 22 #get number of subplots 23 subplotwidth=ceil(sqrt(options.numberofplots)) 24 #Get figure number and number of plots 25 figurenumber=options.figurenumber 26 numberofplots=options.numberofplots 27 28 #get hold 29 hold=options.list[0].getfieldvalue('hold',False) 30 31 #if nrows and ncols specified, then bypass 32 if options.list[0].exist('nrows'): 33 nrows=options.list[0].getfieldvalue('nrows') 34 nr=True 35 else: 36 nrows=np.ceil(numberofplots/subplotwidth) 37 nr=False 38 39 if options.list[0].exist('ncols'): 40 ncols=options.list[0].getfieldvalue('ncols') 41 nc=True 42 else: 43 ncols=int(subplotwidth) 44 nc=False 45 ncols=int(ncols) 46 nrows=int(nrows) 47 48 #check that nrows and ncols were given at the same time! 49 if not nr==nc: 50 raise Exception('error: nrows and ncols need to be specified together, or not at all') 51 52 #Go through plots 53 if numberofplots: 54 #if plt.fignum_exists(figurenumber): 55 # plt.cla() 56 57 #if figsize specified 58 if options.list[0].exist('figsize'): 59 figsize=options.list[0].getfieldvalue('figsize') 60 fig=plt.figure(figurenumber,figsize=(figsize[0],figsize[1]))#,tight_layout=True) 61 else: 62 fig=plt.figure(figurenumber)#,tight_layout=True) 63 fig.clf() 64 65 backgroundcolor=options.list[0].getfieldvalue('backgroundcolor',(0.7,0.7,0.7)) 66 fig.set_facecolor(backgroundcolor) 10 print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled") 67 11 68 12 69 translator={'on':'each', 70 'off':'None', 71 'one':'single'} 72 # options needed to define plot grid 73 plotnum=options.numberofplots 74 if plotnum==1: 75 plotnum=None 76 direction=options.list[0].getfieldvalue('direction','row') # row,column 77 axes_pad=options.list[0].getfieldvalue('axes_pad',0.25) 78 add_all=options.list[0].getfieldvalue('add_all',True) # True,False 79 share_all=options.list[0].getfieldvalue('share_all',True) # True,False 80 label_mode=options.list[0].getfieldvalue('label_mode','L') # 1,L,all 81 colorbar=options.list[0].getfieldvalue('colorbar','on') # on, off (single) 82 cbar_mode=translator[colorbar] 83 cbar_location=options.list[0].getfieldvalue('colorbarpos','right') # right,top 84 cbar_size=options.list[0].getfieldvalue('colorbarsize','5%') 85 cbar_pad=options.list[0].getfieldvalue('colorbarpad',0.025) # None or % 13 def plotmodel(md, *args): 14 ''' at command prompt, type 'plotdoc()' for additional documentation 15 ''' 86 16 87 axgrid=ImageGrid(fig,111, 88 nrows_ncols=(nrows,ncols), 89 ngrids=plotnum, 90 direction=direction, 91 axes_pad=axes_pad, 92 add_all=add_all, 93 share_all=share_all, 94 label_mode=label_mode, 95 cbar_mode=cbar_mode, 96 cbar_location=cbar_location, 97 cbar_size=cbar_size, 98 cbar_pad=cbar_pad) 17 #First process options 18 options = plotoptions(*args) 99 19 100 if cbar_mode=='None': 101 for ax in axgrid.cbar_axes: 102 fig._axstack.remove(ax) 20 #get number of subplots 21 subplotwidth = ceil(sqrt(options.numberofplots)) 22 #Get figure number and number of plots 23 figurenumber = options.figurenumber 24 numberofplots = options.numberofplots 103 25 104 for i,ax in enumerate(axgrid.axes_all): 105 plot_manager(options.list[i].getfieldvalue('model',md),options.list[i],fig,axgrid,i) 106 fig.show() 107 else: 108 raise Exception('plotmodel error message: no output data found.') 26 #get hold 27 hold = options.list[0].getfieldvalue('hold', False) 28 29 #if nrows and ncols specified, then bypass 30 if options.list[0].exist('nrows'): 31 nrows = options.list[0].getfieldvalue('nrows') 32 nr = True 33 else: 34 nrows = np.ceil(numberofplots / subplotwidth) 35 nr = False 36 37 if options.list[0].exist('ncols'): 38 ncols = options.list[0].getfieldvalue('ncols') 39 nc = True 40 else: 41 ncols = int(subplotwidth) 42 nc = False 43 ncols = int(ncols) 44 nrows = int(nrows) 45 46 #check that nrows and ncols were given at the same time! 47 if not nr == nc: 48 raise Exception('error: nrows and ncols need to be specified together, or not at all') 49 50 #Go through plots 51 if numberofplots: 52 #if plt.fignum_exists(figurenumber): 53 # plt.cla() 54 55 #if figsize specified 56 if options.list[0].exist('figsize'): 57 figsize = options.list[0].getfieldvalue('figsize') 58 fig = plt.figure(figurenumber, figsize=(figsize[0], figsize[1])) 59 else: 60 fig = plt.figure(figurenumber) 61 fig.clf() 62 63 backgroundcolor = options.list[0].getfieldvalue('backgroundcolor', (0.7, 0.7, 0.7)) 64 fig.set_facecolor(backgroundcolor) 65 66 translator = {'on': 'each', 67 'off': 'None', 68 'one': 'single'} 69 # options needed to define plot grid 70 plotnum = options.numberofplots 71 if plotnum == 1: 72 plotnum = None 73 direction = options.list[0].getfieldvalue('direction', 'row') # row, column 74 axes_pad = options.list[0].getfieldvalue('axes_pad', 0.25) 75 add_all = options.list[0].getfieldvalue('add_all', True) # True, False 76 share_all = options.list[0].getfieldvalue('share_all', True) # True, False 77 label_mode = options.list[0].getfieldvalue('label_mode', 'L') # 1, L, all 78 colorbar = options.list[0].getfieldvalue('colorbar', 'on') # on, off (single) 79 cbar_mode = translator[colorbar] 80 cbar_location = options.list[0].getfieldvalue('colorbarpos', 'right') # right, top 81 cbar_size = options.list[0].getfieldvalue('colorbarsize', '5%') 82 cbar_pad = options.list[0].getfieldvalue('colorbarpad', 0.025) # None or % 83 84 axgrid = ImageGrid(fig, 111, 85 nrows_ncols=(nrows, ncols), 86 #ngrids=plotnum, 87 direction=direction, 88 axes_pad=axes_pad, 89 add_all=add_all, 90 share_all=share_all, 91 label_mode=label_mode, 92 cbar_mode=cbar_mode, 93 cbar_location=cbar_location, 94 cbar_size=cbar_size, 95 cbar_pad=cbar_pad) 96 97 if cbar_mode == 'None': 98 for ax in axgrid.cbar_axes: 99 fig._axstack.remove(ax) 100 101 for i, ax in enumerate(axgrid.axes_all): 102 plot_manager(options.list[i].getfieldvalue('model', md), options.list[i], fig, axgrid, i) 103 fig.show() 104 else: 105 raise Exception('plotmodel error message: no output data found.')
Note:
See TracChangeset
for help on using the changeset viewer.