Index: ../trunk-jpl/src/wrappers/python/include/pythonincludes.h =================================================================== --- ../trunk-jpl/src/wrappers/python/include/pythonincludes.h (revision 19894) +++ ../trunk-jpl/src/wrappers/python/include/pythonincludes.h (revision 19895) @@ -10,7 +10,7 @@ #ifdef _HAVE_PYTHON_ -#if _PYTHON_MAJOR_ == 2 +#if _PYTHON_MAJOR_ >= 2 #undef NPY_NO_DEPRECATED_API #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #else Index: ../trunk-jpl/src/wrappers/python/io/FetchPythonData.cpp =================================================================== --- ../trunk-jpl/src/wrappers/python/io/FetchPythonData.cpp (revision 19894) +++ ../trunk-jpl/src/wrappers/python/io/FetchPythonData.cpp (revision 19895) @@ -23,9 +23,9 @@ /*return internal value: */ if (PyFloat_Check(py_float)) dscalar=PyFloat_AsDouble(py_float); - else if (PyInt_Check(py_float)) - dscalar=(double)PyInt_AsLong(py_float); else if (PyLong_Check(py_float)) + dscalar=(double)PyLong_AsLong(py_float); + else if (PyLong_Check(py_float)) dscalar=PyLong_AsDouble(py_float); else if (PyBool_Check(py_float)) dscalar=(double)PyLong_AsLong(py_float); @@ -46,8 +46,8 @@ int iscalar; /*return internal value: */ - if (PyInt_Check(py_long)) - iscalar=(int)PyInt_AsLong(py_long); + if (PyLong_Check(py_long)) + iscalar=(int)PyLong_AsLong(py_long); else if (PyLong_Check(py_long)) iscalar=(int)PyLong_AsLong(py_long); else if (PyFloat_Check(py_long)) @@ -73,10 +73,10 @@ /*return internal value: */ if (PyBool_Check(py_boolean)) bscalar=(bool)PyLong_AsLong(py_boolean); - else if (PyInt_Check(py_boolean)) - bscalar=(bool)PyInt_AsLong(py_boolean); else if (PyLong_Check(py_boolean)) bscalar=(bool)PyLong_AsLong(py_boolean); + else if (PyLong_Check(py_boolean)) + bscalar=(bool)PyLong_AsLong(py_boolean); else if (PyTuple_Check(py_boolean) && (int)PyTuple_Size(py_boolean)==1) FetchData(&bscalar,PyTuple_GetItem(py_boolean,(Py_ssize_t)0)); else if (PyList_Check(py_boolean) && (int)PyList_Size(py_boolean)==1) @@ -723,7 +723,7 @@ /*Fetch all options*/ for (int i=istart; i(contourname); } Index: ../trunk-jpl/src/wrappers/python/io/WritePythonData.cpp =================================================================== --- ../trunk-jpl/src/wrappers/python/io/WritePythonData.cpp (revision 19894) +++ ../trunk-jpl/src/wrappers/python/io/WritePythonData.cpp (revision 19895) @@ -19,7 +19,7 @@ /*FUNCTION WriteData(PyObject* py_tuple,int index,int integer){{{*/ void WriteData(PyObject* py_tuple, int index, int integer){ - PyTuple_SetItem(py_tuple, index, PyInt_FromSsize_t((Py_ssize_t)integer)); + PyTuple_SetItem(py_tuple, index, PyLong_FromSsize_t((Py_ssize_t)integer)); }/*}}}*/ /*FUNCTION WriteData(PyObject* py_tuple,int index,char* string){{{*/ @@ -211,13 +211,13 @@ for (i=0; inumrifts; i++) { dict=PyDict_New(); - PyDict_SetItemString(dict,"numsegs" ,PyInt_FromSsize_t((Py_ssize_t)riftstruct->riftsnumsegments[i])); + PyDict_SetItemString(dict,"numsegs" ,PyLong_FromSsize_t((Py_ssize_t)riftstruct->riftsnumsegments[i])); PyDict_SetItemString(dict,"segments" ,PyArrayFromCopiedData(riftstruct->riftsnumsegments[i] ,3,riftstruct->riftssegments[i])); PyDict_SetItemString(dict,"pairs" ,PyArrayFromCopiedData(riftstruct->riftsnumpairs[i] ,2,riftstruct->riftspairs[i])); PyDict_SetItemString(dict,"tips" ,PyArrayFromCopiedData(1 ,2,&riftstruct->riftstips[2*i])); PyDict_SetItemString(dict,"penaltypairs" ,PyArrayFromCopiedData(riftstruct->riftsnumpenaltypairs[i],7,riftstruct->riftspenaltypairs[i])); - PyDict_SetItemString(dict,"fill" ,PyInt_FromSsize_t((Py_ssize_t)IceEnum)); - PyDict_SetItemString(dict,"friction" ,PyInt_FromSsize_t((Py_ssize_t)0)); + PyDict_SetItemString(dict,"fill" ,PyLong_FromSsize_t((Py_ssize_t)IceEnum)); + PyDict_SetItemString(dict,"friction" ,PyLong_FromSsize_t((Py_ssize_t)0)); PyDict_SetItemString(dict,"fraction" ,PyFloat_FromDouble(0.)); PyDict_SetItemString(dict,"fractionincrement",PyFloat_FromDouble(0.1)); PyDict_SetItemString(dict,"state" ,PyArrayFromCopiedData(riftstruct->riftsnumpenaltypairs[i],1,riftstruct->state[i])); Index: ../trunk-jpl/src/wrappers/IssmConfig/IssmConfig.cpp =================================================================== --- ../trunk-jpl/src/wrappers/IssmConfig/IssmConfig.cpp (revision 19894) +++ ../trunk-jpl/src/wrappers/IssmConfig/IssmConfig.cpp (revision 19895) @@ -23,6 +23,7 @@ IssmDouble value = 0.; char *svalue = NULL; + /*Boot module: */ MODULEBOOT(); @@ -161,10 +162,12 @@ else WriteData(VALUE,value); + /*Clean up*/ xDelete(name); xDelete(svalue); /*end module: */ MODULEEND(); + } Index: ../trunk-jpl/src/wrappers/IssmConfig/IssmConfig.h =================================================================== --- ../trunk-jpl/src/wrappers/IssmConfig/IssmConfig.h (revision 19894) +++ ../trunk-jpl/src/wrappers/IssmConfig/IssmConfig.h (revision 19895) @@ -5,6 +5,7 @@ #ifndef _ISSMCONFIG_H #define _ISSMCONFIG_H + #ifdef HAVE_CONFIG_H #include #else Index: ../trunk-jpl/src/py3/os/issmscpin.py =================================================================== --- ../trunk-jpl/src/py3/os/issmscpin.py (revision 0) +++ ../trunk-jpl/src/py3/os/issmscpin.py (revision 19895) @@ -0,0 +1,68 @@ +from socket import gethostname +import subprocess +import os +import shutil +import MatlabFuncs as m + +def issmscpin(host, login,port,path, packages): + """ + ISSMSCPIN get packages from host, using scp on unix, and pscp on windows + + usage: issmscpin(host,packages,path) + """ + + #first get hostname + hostname=gethostname() + + #first be sure packages are not in the current directory, this could conflict with pscp on windows. + #remove warnings in case the files do not exist + for package in packages: + try: + os.remove(package) + except OSError as e: + pass + + #if hostname and host are the same, do a simple copy + if m.strcmpi(hostname,host): + + for package in packages: + try: + shutil.copy(os.path.join(path,package),os.getcwd()) #keep going, even if success=0 + except OSError as e: + pass + + else: + + if m.ispc(): + #use the putty project pscp.exe: it should be in the path. + + #get ISSM_DIR variable + if 'ISSM_DIR_WIN' in os.environ: + ISSM_DIR=os.environ['ISSM_DIR_WIN'][1:-2] + else: + raise OSError("issmscpin error message: could not find ISSM_DIR_WIN environment variable.") + + username=input('Username: (quoted string) ') + key=input('Key: (quoted string) ') + + for package in packages: + try: + subprocess.check_call('%s/externalpackages/ssh/pscp.exe -l "%s" -pw "%s" %s:%s %s' % (ISSM_DIR,username,key,host,os.path.join(path,package),os.getcwd()),shell=True) + except CalledProcessError as e: + raise CalledProcessError("issmscpin error message: could not call putty pscp.") + + else: + #just use standard unix scp + #string to copy multiple files using scp: + string='\{'+','.join([str(x) for x in packages])+'\}' + + if port: + subprocess.call('scp -P %d %s@localhost:%s %s/. ' % (port,login,os.path.join(path,string),os.getcwd()),shell=True) + else: + subprocess.call('scp %s@%s:%s %s/.' % (login,host,os.path.join(path,string),os.getcwd()),shell=True) + + #check scp worked + for package in packages: + if not os.path.exists(os.path.join('.',package)): + raise OSError("issmscpin error message: could not call scp on *nix system.") + Index: ../trunk-jpl/src/py3/os/issmscpout.py =================================================================== --- ../trunk-jpl/src/py3/os/issmscpout.py (revision 0) +++ ../trunk-jpl/src/py3/os/issmscpout.py (revision 19895) @@ -0,0 +1,59 @@ +from socket import gethostname +import subprocess +import os +import MatlabFuncs as m + +def issmscpout(host,path,login,port,packages): + """ + ISSMSCPOUT send packages to a host, using scp on unix, and pscp on windows + + usage: issmscpout(host,path,packages) + """ + + #get hostname + hostname=gethostname(); + + #if hostname and host are the same, do a simple copy + + if m.strcmpi(host,hostname): + for package in packages: + here=os.getcwd() + os.chdir(path) + try: + os.remove(package) + except OSError as e: + pass + subprocess.call('ln -s %s %s' % (os.path.join(here,package),path),shell=True) + os.chdir(here) + else: + if m.ispc(): + #use the putty project pscp.exe: it should be in the path. + + #get ISSM_DIR variable + if 'ISSM_DIR_WIN' in os.environ: + ISSM_DIR=os.environ['ISSM_DIR_WIN'][1:-2] + else: + raise OSError("issmscpout error message: could not find ISSM_DIR_WIN environment variable.") + + username=input('Username: (quoted string) ') + key=input('Key: (quoted string) ') + + for package in packages: + try: + subprocess.check_call('%s/externalpackages/ssh/pscp.exe -l "%s" -pw "%s" %s %s:%s' % (ISSM_DIR,username,key,package,host,path),shell=True) + except CalledProcessError as e: + raise CalledProcessError("issmscpout error message: could not call putty pscp.") + + else: + #just use standard unix scp + #create string of packages being sent + string='' + for package in packages: + string+=' '+package + string+=' ' + + if port: + subprocess.call('scp -P %d %s %s@localhost:%s' % (port,string,login,path),shell=True) + else: + subprocess.call('scp %s %s@%s:%s' % (string,login,host,path),shell=True) + Index: ../trunk-jpl/src/py3/os/issmssh.py =================================================================== --- ../trunk-jpl/src/py3/os/issmssh.py (revision 0) +++ ../trunk-jpl/src/py3/os/issmssh.py (revision 19895) @@ -0,0 +1,59 @@ +from socket import gethostname +import subprocess +import os +import MatlabFuncs as m + +def issmssh(host,login,port,command): + """ + ISSMSSH - wrapper for OS independent ssh command. + + usage: + issmssh(host,command) + """ + + #first get hostname + hostname=gethostname() + + #if same as host, just run the command. + if m.strcmpi(host,hostname): + subprocess.call(command,shell=True) + else: + if m.ispc(): + #use the putty project plink.exe: it should be in the path. + + #get ISSM_DIR variable + if 'ISSM_DIR_WIN' in os.environ: + ISSM_DIR=os.environ['ISSM_DIR_WIN'][1:-2] + else: + raise OSError("issmssh error message: could not find ISSM_DIR_WIN environment variable.") + + username=input('Username: (quoted string) ') + key=input('Key: (quoted string) ') + + subprocess.call('%s/externalpackages/ssh/plink.exe -ssh -l "%s" -pw "%s" %s "%s"' % (ISSM_DIR,username,key,host,command),shell=True); + + else: + #just use standard unix ssh + if port: + subprocess.call('ssh -l %s -p %d localhost "%s"' % (login,port,command),shell=True) + else: + subprocess.call('ssh -l %s %s "%s"' % (login,host,command),shell=True) + + # The following code was added to fix: + # "IOError: [Errno 35] Resource temporarily unavailable" + # on the Mac when trying to display md after the solution. + # (from http://code.google.com/p/robotframework/issues/detail?id=995) + + # Make FreeBSD use blocking I/O like other platforms + import sys + import fcntl + from os import O_NONBLOCK + + fd = sys.stdin.fileno() + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK) + + fd = sys.stdout.fileno() + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK) + Index: ../trunk-jpl/src/py3/os/issmdir.py =================================================================== --- ../trunk-jpl/src/py3/os/issmdir.py (revision 0) +++ ../trunk-jpl/src/py3/os/issmdir.py (revision 19895) @@ -0,0 +1,23 @@ +import os +import MatlabFuncs as m + +def issmdir(): + """ + ISSMDIR - Get ISSM_DIR environment variable + + Usage: + ISSM_DIR=issmdir() + """ + + if not m.ispc(): + ISSM_DIR =os.environ['ISSM_DIR'] + else: + ISSM_DIR =os.environ['ISSM_DIR_WIN'] + if m.strcmpi(ISSM_DIR[-1],'/') or m.strcmpi(ISSM_DIR[-1],'\\'): + ISSM_DIR = ISSM_DIR[:-1] #shave off the last '/' + + if not ISSM_DIR: + raise RuntimeError("issmdir error message: 'ISSM_DIR' environment variable is empty! You should define ISSM_DIR in your .cshrc or .bashrc!") + + return ISSM_DIR + Index: ../trunk-jpl/src/py3/materials/paterson.py =================================================================== --- ../trunk-jpl/src/py3/materials/paterson.py (revision 0) +++ ../trunk-jpl/src/py3/materials/paterson.py (revision 19895) @@ -0,0 +1,79 @@ +import numpy + +def paterson(temperature): + """ + PATERSON - figure out the rigidity of ice for a given temperature + + rigidity (in s^(1/3)Pa) is the flow law paramter in the flow law sigma=B*e(1/3) (Paterson, p97). + temperature is in Kelvin degrees + + Usage: + rigidity=paterson(temperature) + """ + + if numpy.any(temperature<0.): + raise RuntimeError("input temperature should be in Kelvin (positive)") + + if numpy.ndim(temperature)==2: + #T = temperature.reshape(-1,)-273.15 + T = temperature.flatten()-273.15 + elif isinstance(temperature,float) or isinstance(temperature,int): + T = numpy.array([temperature])-273.15 + else: + T = temperature-273.15 + + #The routine below is equivalent to: + + # n=3; T=temperature-273; + # %From paterson, + # Temp=[0;-2;-5;-10;-15;-20;-25;-30;-35;-40;-45;-50]; + # A=[6.8*10^-15;2.4*10^-15;1.6*10^-15;4.9*10^-16;2.9*10^-16;1.7*10^-16;9.4* + # 10^-17;5.1*10^-17;2.7*10^-17;1.4*10^-17;7.3*10^-18;3.6*10^-18];;%s-1(kPa-3) + # %Convert into rigidity B + # B=A.^(-1/n)*10^3; %s^(1/3)Pa + # %Now, do a cubic fit between Temp and B: + # fittedmodel=fit(Temp,B,'cubicspline'); + # rigidity=fittedmodel(temperature); + + rigidity=numpy.zeros_like(T) + pos1=numpy.nonzero(T<=-45)[0] + if len(pos1): + rigidity[pos1]=10**8*(-0.000292866376675*(T[pos1]+50)**3+ 0.011672640664130*(T[pos1]+50)**2 -0.325004442485481*(T[pos1]+50)+ 6.524779401948101) + pos2=numpy.nonzero(numpy.logical_and(-45<=T,T<-40))[0] + if len(pos2): + rigidity[pos2]=10**8*(-0.000292866376675*(T[pos2]+45)**3+ 0.007279645014004*(T[pos2]+45)**2 -0.230243014094813*(T[pos2]+45)+ 5.154964909039554) + pos3=numpy.nonzero(numpy.logical_and(-40<=T,T<-35))[0] + if len(pos3): + rigidity[pos3]=10**8*(0.000072737147457*(T[pos3]+40)**3+ 0.002886649363879*(T[pos3]+40)**2 -0.179411542205399*(T[pos3]+40)+ 4.149132666831214) + pos4=numpy.nonzero(numpy.logical_and(-35<=T,T<-30))[0] + if len(pos4): + rigidity[pos4]=10**8*(-0.000086144770023*(T[pos4]+35)**3+ 0.003977706575736*(T[pos4]+35)**2 -0.145089762507325*(T[pos4]+35)+ 3.333333333333331) + pos5=numpy.nonzero(numpy.logical_and(-30<=T,T<-25))[0] + if len(pos5): + rigidity[pos5]=10**8*(-0.000043984685769*(T[pos5]+30)**3+ 0.002685535025386*(T[pos5]+30)**2 -0.111773554501713*(T[pos5]+30)+ 2.696559088937191) + pos6=numpy.nonzero(numpy.logical_and(-25<=T,T<-20))[0] + if len(pos6): + rigidity[pos6]=10**8*(-0.000029799523463*(T[pos6]+25)**3+ 0.002025764738854*(T[pos6]+25)**2 -0.088217055680511*(T[pos6]+25)+ 2.199331606342181) + pos7=numpy.nonzero(numpy.logical_and(-20<=T,T<-15))[0] + if len(pos7): + rigidity[pos7]=10**8*(0.000136920904777*(T[pos7]+20)**3+ 0.001578771886910*(T[pos7]+20)**2 -0.070194372551690*(T[pos7]+20)+ 1.805165505978111) + pos8=numpy.nonzero(numpy.logical_and(-15<=T,T<-10))[0] + if len(pos8): + rigidity[pos8]=10**8*(-0.000899763781026*(T[pos8]+15)**3+ 0.003632585458564*(T[pos8]+15)**2 -0.044137585824322*(T[pos8]+15)+ 1.510778053489523) + pos9=numpy.nonzero(numpy.logical_and(-10<=T,T<-5))[0] + if len(pos9): + rigidity[pos9]=10**8*(0.001676964325070*(T[pos9]+10)**3- 0.009863871256831*(T[pos9]+10)**2 -0.075294014815659*(T[pos9]+10)+ 1.268434288203714) + pos10=numpy.nonzero(numpy.logical_and(-5<=T,T<-2))[0] + if len(pos10): + rigidity[pos10]=10**8*(-0.003748937622487*(T[pos10]+5)**3+0.015290593619213*(T[pos10]+5)**2 -0.048160403003748*(T[pos10]+5)+ 0.854987973338348) + pos11=numpy.nonzero(-2<=T)[0] + if len(pos11): + rigidity[pos11]=10**8*(-0.003748937622488*(T[pos11]+2)**3-0.018449844983174*(T[pos11]+2)**2 -0.057638157095631*(T[pos11]+2)+ 0.746900791092860) + + #Now make sure that rigidity is positive + pos=numpy.nonzero(rigidity<0)[0] + if len(pos): + rigidity[pos]=1.e6 + + return rigidity + Index: ../trunk-jpl/src/py3/materials/DepthAvgTempCond.py =================================================================== --- ../trunk-jpl/src/py3/materials/DepthAvgTempCond.py (revision 0) +++ ../trunk-jpl/src/py3/materials/DepthAvgTempCond.py (revision 19895) @@ -0,0 +1,34 @@ +import numpy as npy +from TMeltingPoint import TMeltingPoint + +def DepthAvgTempCond(md): + ''' compute conduction dependent temperature profile for an ice sheet. + Usage: + Tbar=DepthAvgTempCond(md) + ''' + + Tpmp=TMeltingPoint(md.materials.meltingpoint,0) #pressure melting point at 0 pressure. + + k=md.materials.thermalconductivity + G=md.basalforcings.geothermalflux + H=md.geometry.thickness + Ts=md.initialization.temperature + alpha=G*H/k + + Tbar=npy.zeros(md.mesh.numberofvertices,) + + #find temperature average when we are below melting point: + pos=npy.nonzero( Ts+alpha < Tpmp) + if pos: + Tbar[pos]=Ts[pos]+alpha[pos]/2 + + pos=npy.nonzero( Ts+alpha>= Tpmp) + if pos: + Tbar[pos]=Tpmp+(Tpmp**2-Ts[pos]**2)/2/alpha[pos]+ Tpmp*(Ts[pos]-Tpmp)/alpha[pos] + + #on ice shelf, easier: + pos=npy.nonzero(md.mask.groundedice_levelset[0]<=0) + if pos: + Tbar[pos]=(Ts[pos]+Tpmp)/2 + + return Tbar Index: ../trunk-jpl/src/py3/materials/TMeltingPoint.py =================================================================== --- ../trunk-jpl/src/py3/materials/TMeltingPoint.py (revision 0) +++ ../trunk-jpl/src/py3/materials/TMeltingPoint.py (revision 19895) @@ -0,0 +1,21 @@ +import numpy as npy + +def TMeltingPoint(reftemp,pressure): + ''' + Calculate the pressure melting point of ice at a given pressure + + reftemp is the melting temperature in K at atmospheric pressure (initialized in md.materials.meltingpoint) + + pressure is in Pa + + Usage: + Tm=TMeltingPoint(md.materials.meltingpoint,pressure) + ''' + + #variables + beta=7.9e-8 + + #ensure ref is same dimension as pressure + ref=reftemp*npy.ones_like(pressure) + + return reftemp-beta*pressure Index: ../trunk-jpl/src/py3/materials/cuffey.py =================================================================== --- ../trunk-jpl/src/py3/materials/cuffey.py (revision 0) +++ ../trunk-jpl/src/py3/materials/cuffey.py (revision 19895) @@ -0,0 +1,49 @@ +import numpy + +def cuffey(temperature): + """ + CUFFEY - calculates ice rigidity as a function of temperature + + rigidity (in s^(1/3)Pa) is the flow law parameter in the flow law sigma=B*e(1/3) + (Cuffey and Paterson, p75). + temperature is in Kelvin degrees + + Usage: + rigidity=cuffey(temperature) + """ + + if numpy.any(temperature<0.): + raise RuntimeError("input temperature should be in Kelvin (positive)") + + T = temperature.reshape(-1,)-273.15 + + rigidity=numpy.zeros_like(T) + pos=numpy.nonzero(T<=-45) + rigidity[pos]=10**8*(-0.000396645116301*(T[pos]+50)**3+ 0.013345579471334*(T[pos]+50)**2 -0.356868703259105*(T[pos]+50)+7.272363035371383) + pos=numpy.nonzero(numpy.logical_and(-45<=T,T<-40)) + rigidity[pos]=10**8*(-0.000396645116301*(T[pos]+45)**3+ 0.007395902726819*(T[pos]+45)**2 -0.253161292268336*(T[pos]+45)+5.772078366321591) + pos=numpy.nonzero(numpy.logical_and(-40<=T,T<-35)) + rigidity[pos]=10**8*(0.000408322072669*(T[pos]+40)**3+ 0.001446225982305*(T[pos]+40)**2 -0.208950648722716*(T[pos]+40)+4.641588833612773) + pos=numpy.nonzero(numpy.logical_and(-35<=T,T<-30)) + rigidity[pos]=10**8*(-0.000423888728124*(T[pos]+35)**3+ 0.007571057072334*(T[pos]+35)**2 -0.163864233449525*(T[pos]+35)+3.684031498640382) + pos=numpy.nonzero(numpy.logical_and(-30<=T,T<-25)) + rigidity[pos]=10**8*(0.000147154327025*(T[pos]+30)**3+ 0.001212726150476*(T[pos]+30)**2 -0.119945317335478*(T[pos]+30)+3.001000667185614) + pos=numpy.nonzero(numpy.logical_and(-25<=T,T<-20)) + rigidity[pos]=10**8*(-0.000193435838672*(T[pos]+25)**3+ 0.003420041055847*(T[pos]+25)**2 -0.096781481303861*(T[pos]+25)+2.449986525148220) + pos=numpy.nonzero(numpy.logical_and(-20<=T,T<-15)) + rigidity[pos]=10**8*(0.000219771255067*(T[pos]+20)**3+ 0.000518503475772*(T[pos]+20)**2 -0.077088758645767*(T[pos]+20)+2.027400665191131) + pos=numpy.nonzero(numpy.logical_and(-15<=T,T<-10)) + rigidity[pos]=10**8*(-0.000653438900191*(T[pos]+15)**3+ 0.003815072301777*(T[pos]+15)**2 -0.055420879758021*(T[pos]+15)+1.682390865739973) + pos=numpy.nonzero(numpy.logical_and(-10<=T,T<-5)) + rigidity[pos]=10**8*(0.000692439419762*(T[pos]+10)**3 -0.005986511201093 *(T[pos]+10)**2 -0.066278074254598*(T[pos]+10)+1.418983411970382) + pos=numpy.nonzero(numpy.logical_and(-5<=T,T<-2)) + rigidity[pos]=10**8*(-0.000132282004110*(T[pos]+5)**3 +0.004400080095332*(T[pos]+5)**2 -0.074210229783403*(T[pos]+5)+ 1.024485188140279) + pos=numpy.nonzero(-2<=T) + rigidity[pos]=10**8*(-0.000132282004110*(T[pos]+2)**3 +0.003209542058346*(T[pos]+2)**2 -0.051381363322371*(T[pos]+2)+ 0.837883605537096) + + #Now make sure that rigidity is positive + pos=numpy.nonzero(rigidity<0) + rigidity[pos]=1**6 + + return rigidity + Index: ../trunk-jpl/src/py3/solve/solve.py =================================================================== --- ../trunk-jpl/src/py3/solve/solve.py (revision 0) +++ ../trunk-jpl/src/py3/solve/solve.py (revision 19895) @@ -0,0 +1,132 @@ +import datetime +import os +import shutil +from pairoptions import pairoptions +from EnumDefinitions import * +from EnumToString import EnumToString +from ismodelselfconsistent import ismodelselfconsistent +from marshall import marshall +from waitonlock import waitonlock +from loadresultsfromcluster import loadresultsfromcluster +import MatlabFuncs as m + +def solve(md,solutionenum,**kwargs): + """ + SOLVE - apply solution sequence for this model + + Usage: + md=solve(md,solutionenum,varargin) + where varargin is a list of paired arguments of string OR enums + + solution types available comprise: + - StressbalanceSolutionEnum + - MasstransportSolutionEnum + - ThermalSolutionEnum + - SteadystateSolutionEnum + - TransientSolutionEnum + - BalancethicknessSolutionEnum + - BedSlopeSolutionEnum + - SurfaceSlopeSolutionEnum + - HydrologySolutionEnum + - FlaimSolutionEnum + + extra options: + - loadonly : does not solve. only load results + - checkconsistency : 'yes' or 'no' (default is 'yes'), ensures checks on consistency of model + - restart: 'directory name (relative to the execution directory) where the restart file is located. + + Examples: + md=solve(md,StressbalanceSolutionEnum); + """ + + #recover and process solve options + if EnumToString(solutionenum)[0][-8:] != 'Solution': + raise ValueError("solutionenum '%s' not supported!" % EnumToString(solutionenum)[0]) + options=pairoptions(solutionenum=solutionenum,**kwargs) + + #recover some fields + md.private.solution=solutionenum + cluster=md.cluster + + #check model consistency + if m.strcmpi(options.getfieldvalue('checkconsistency','yes'),'yes'): + print("checking model consistency") + if solutionenum == FlaimSolutionEnum(): + md.private.isconsistent=True + md.mesh.checkconsistency(md,solutionenum) + md.flaim.checkconsistency(md,solutionenum) + if not md.private.isconsistent: + raise RuntimeError("Model not consistent, see messages above.") + else: + ismodelselfconsistent(md) + + #First, build a runtime name that is unique + restart=options.getfieldvalue('restart','') + if restart == 1: + pass #do nothing + else: + if restart: + md.private.runtimename=restart + else: + if options.getfieldvalue('runtimename',True): + c=datetime.datetime.now() + md.private.runtimename="%s-%02i-%02i-%04i-%02i-%02i-%02i-%i" % (md.miscellaneous.name,c.month,c.day,c.year,c.hour,c.minute,c.second,os.getpid()) + else: + md.private.runtimename=md.miscellaneous.name + + #if running qmu analysis, some preprocessing of dakota files using models + #fields needs to be carried out. + if md.qmu.isdakota: + md=preqmu(md,options) + + #flaim analysis + if solutionenum == FlaimSolutionEnum(): + md=flaim_sol(md,options) + [md.private.solution]=EnumToString(solutionenum) + return md + + #Do we load results only? + if options.getfieldvalue('loadonly',False): + md=loadresultsfromcluster(md) + return md + + + #Write all input files + marshall(md) # bin file + md.toolkits.ToolkitsFile(md.miscellaneous.name+'.toolkits') # toolkits file + cluster.BuildQueueScript(md.private.runtimename,md.miscellaneous.name,md.private.solution,md.settings.io_gather,md.debug.valgrind,md.debug.gprof,md.qmu.isdakota) # queue file + + #Stop here if batch mode + if m.strcmpi(options.getfieldvalue('batch','no'),'yes'): + print('batch mode requested: not launching job interactively') + print('launch solution sequence on remote cluster by hand') + return md + + #Upload all required files: + modelname = md.miscellaneous.name + filelist = [modelname+'.bin ',modelname+'.toolkits ',modelname+'.queue '] + if md.qmu.isdakota: + filelist.append(modelname+'.qmu.in') + + if not restart: + cluster.UploadQueueJob(md.miscellaneous.name,md.private.runtimename,filelist) + + #Launch job + cluster.LaunchQueueJob(md.miscellaneous.name,md.private.runtimename,filelist,restart) + + #wait on lock + if md.settings.waitonlock>0: + #we wait for the done file + islock=waitonlock(md) + if islock==0: #no results to be loaded + print('The results must be loaded manually with md=loadresultsfromcluster(md).') + else: #load results + print('loading results from cluster') + md=loadresultsfromcluster(md) + + #post processes qmu results if necessary + if md.qmu.isdakota: + if not strncmpi(options['keep'],'y',1): + shutil.rmtree('qmu'+str(os.getpid())) + + return md Index: ../trunk-jpl/src/py3/solve/waitonlock.py =================================================================== --- ../trunk-jpl/src/py3/solve/waitonlock.py (revision 0) +++ ../trunk-jpl/src/py3/solve/waitonlock.py (revision 19895) @@ -0,0 +1,64 @@ +import os +from socket import gethostname +import time +import MatlabFuncs as m + +def waitonlock(md): + """ + WAITONLOCK - wait for a file + + This routine will return when a file named 'filename' is written to disk. + If the time limit given in input is exceeded, return 0 + + Usage: + flag=waitonlock(md) + """ + + #Get filename (lock file) and options + executionpath=md.cluster.executionpath + cluster=md.cluster.name + login=md.cluster.login + port=md.cluster.port + timelimit=md.settings.waitonlock + filename=os.path.join(executionpath,md.private.runtimename,md.miscellaneous.name+'.lock') + + #waitonlock will work if the lock is on the same machine only: + if not m.strcmpi(gethostname(),cluster): + + print('solution launched on remote cluster. log in to detect job completion.') + choice=input('Is the job successfully completed? (y/n) ') + if not m.strcmp(choice,'y'): + print('Results not loaded... exiting') + flag=0 + else: + flag=1 + + #job is running on the same machine + else: + + if 'interactive' in vars(md.cluster) and md.cluster.interactive: + #We are in interactive mode, no need to check for job completion + flag=1 + return flag + #initialize time and file presence test flag + etime=0 + ispresent=0 + print("waiting for '%s' hold on... (Ctrl+C to exit)" % filename) + + #loop till file .lock exist or time is up + while ispresent==0 and etimetimelimit: + print('Time limit exceeded. Increase md.settings.waitonlock') + print('The results must be loaded manually with md=loadresultsfromcluster(md).') + raise RuntimeError('waitonlock error message: time limit exceeded.') + flag=0 + else: + flag=1 + + return flag + Index: ../trunk-jpl/src/py3/solve/parseresultsfromdisk.py =================================================================== --- ../trunk-jpl/src/py3/solve/parseresultsfromdisk.py (revision 0) +++ ../trunk-jpl/src/py3/solve/parseresultsfromdisk.py (revision 19895) @@ -0,0 +1,275 @@ +import struct +import numpy +from collections import OrderedDict +import results as resultsclass +import MatlabFuncs as m + +def parseresultsfromdisk(filename,iosplit): + """ + PARSERESULTSFROMDISK - ... + + Usage: + results=parseresultsfromdisk(filename,iosplit) + """ + + if iosplit: + results=parseresultsfromdiskiosplit(filename) + else: + results=parseresultsfromdiskioserial(filename) + + return results + +def parseresultsfromdiskioserial(filename): # {{{ + """ + PARSERESULTSFROMDISK - ... + + Usage: + results=parseresultsfromdiskioserial(filename) + """ + + #Open file + try: + fid=open(filename,'rb') + except IOError as e: + raise IOError("loadresultsfromdisk error message: could not open '%s' for binary reading." % filename) + + #initialize results: + results=[] + results.append(None) + + #Read fields until the end of the file. + result=ReadData(fid) + + counter=0 + check_nomoresteps=0 + step=result['step'] + + while result: + + if check_nomoresteps: + #check that the new result does not add a step, which would be an error: + if result['step']>=1: + raise TypeError("parsing results for a steady-state core, which incorporates transient results!") + + #Check step, increase counter if this is a new step + if(step!=result['step'] and result['step']>1): + counter = counter + 1 + step = result['step'] + + #Add result + if result['step']==0: + #if we have a step = 0, this is a steady state solution, don't expect more steps. + index = 0; + check_nomoresteps=1 + + elif result['step']==1: + index = 0 + else: + index = counter; + + if index > len(results)-1: + for i in range(len(results)-1,index-1): + results.append(None) + results.append(resultsclass.results()) + + elif results[index] is None: + results[index]=resultsclass.results() + + + #Get time and step + if result['step'] != -9999.: + setattr(results[index],'step',result['step']) + if result['time'] != -9999.: + setattr(results[index],'time',result['time']) + + #Add result + if hasattr(results[index],result['fieldname']) and not m.strcmp(result['fieldname'],'SolutionType'): + setattr(results[index],result['fieldname'],numpy.vstack((getattr(results[index],result['fieldname']),result['field']))) + else: + setattr(results[index],result['fieldname'],result['field']) + + #read next result + result=ReadData(fid) + + fid.close() + + return results + # }}} +def parseresultsfromdiskiosplit(filename): # {{{ + """ + PARSERESULTSFROMDISKIOSPLIT - ... + + Usage: + results=parseresultsfromdiskiosplit(filename) + """ + + #Open file + try: + fid=open(filename,'rb') + except IOError as e: + raise IOError("loadresultsfromdisk error message: could not open '%s' for binary reading." % filename) + + results=[] + + #if we have done split I/O, ie, we have results that are fragmented across patches, + #do a first pass, and figure out the structure of results + result=ReadDataDimensions(fid) + while result: + + #Get time and step + if result['step'] > len(results): + for i in range(len(results),result['step']-1): + results.append(None) + results.append(resultsclass.results()) + setattr(results[result['step']-1],'step',result['step']) + setattr(results[result['step']-1],'time',result['time']) + + #Add result + setattr(results[result['step']-1],result['fieldname'],float('NaN')) + + #read next result + result=ReadDataDimensions(fid) + + #do a second pass, and figure out the size of the patches + fid.seek(0) #rewind + result=ReadDataDimensions(fid) + while result: + + #read next result + result=ReadDataDimensions(fid) + + #third pass, this time to read the real information + fid.seek(0) #rewind + result=ReadData(fid) + while result: + + #Get time and step + if result['step']> len(results): + for i in range(len(results),result['step']-1): + results.append(None) + results.append(resultsclass.results()) + setattr(results[result['step']-1],'step',result['step']) + setattr(results[result['step']-1],'time',result['time']) + + #Add result + setattr(results[result['step']-1],result['fieldname'],result['field']) + + #read next result + result=ReadData(fid) + + #close file + fid.close() + + return results + # }}} +def ReadData(fid): # {{{ + """ + READDATA - ... + + Usage: + field=ReadData(fid) + """ + + #read field + try: + length=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + + fieldname=struct.unpack('%ds' % length,fid.read(length))[0][:-1] + time=struct.unpack('d',fid.read(struct.calcsize('d')))[0] + step=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + + type=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + M=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + if type==1: + field=numpy.array(struct.unpack('%dd' % M,fid.read(M*struct.calcsize('d'))),dtype=float) + elif type==2: + field=struct.unpack('%ds' % M,fid.read(M))[0][:-1] + elif type==3: + N=struct.unpack('i',fid.read(struct.calcsize('i')))[0] +# field=transpose(fread(fid,[N M],'double')); + field=numpy.zeros(shape=(M,N),dtype=float) + for i in range(M): + field[i,:]=struct.unpack('%dd' % N,fid.read(N*struct.calcsize('d'))) + else: + raise TypeError("cannot read data of type %d" % type) + + #Process units here FIXME: this should not be done here! + yts=365.0*24.0*3600.0 + if m.strcmp(fieldname,'BalancethicknessThickeningRate'): + field = field*yts + elif m.strcmp(fieldname,'Time'): + field = field/yts + elif m.strcmp(fieldname,'HydrologyWaterVx'): + field = field*yts + elif m.strcmp(fieldname,'HydrologyWaterVy'): + field = field*yts + elif m.strcmp(fieldname,'Vx'): + field = field*yts + elif m.strcmp(fieldname,'Vy'): + field = field*yts + elif m.strcmp(fieldname,'Vz'): + field = field*yts + elif m.strcmp(fieldname,'Vel'): + field = field*yts + elif m.strcmp(fieldname,'BasalforcingsGroundediceMeltingRate'): + field = field*yts + elif m.strcmp(fieldname,'TotalSmb'): + field = field/10.**12.*yts #(GigaTon/year) + elif m.strcmp(fieldname,'SmbMassBalance'): + field = field*yts + elif m.strcmp(fieldname,'CalvingCalvingrate'): + field = field*yts + + + result=OrderedDict() + result['fieldname']=fieldname + result['time']=time + result['step']=step + result['field']=field + + except struct.error as e: + result=None + + return result + # }}} +def ReadDataDimensions(fid): # {{{ + """ + READDATADIMENSIONS - read data dimensions, step and time, but not the data itself. + + Usage: + field=ReadDataDimensions(fid) + """ + + #read field + try: + length=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + + fieldname=struct.unpack('%ds' % length,fid.read(length))[0][:-1] + time=struct.unpack('d',fid.read(struct.calcsize('d')))[0] + step=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + + type=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + M=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + N=1 #default + if type==1: + fid.seek(M*8,1) + elif type==2: + fid.seek(M,1) + elif type==3: + N=struct.unpack('i',fid.read(struct.calcsize('i')))[0] + fid.seek(N*M*8,1) + else: + raise TypeError("cannot read data of type %d" % type) + + result=OrderedDict() + result['fieldname']=fieldname + result['time']=time + result['step']=step + result['M']=M + result['N']=N + + except struct.error as e: + result=None + + return result + # }}} Index: ../trunk-jpl/src/py3/solve/loadresultsfromdisk.py =================================================================== --- ../trunk-jpl/src/py3/solve/loadresultsfromdisk.py (revision 0) +++ ../trunk-jpl/src/py3/solve/loadresultsfromdisk.py (revision 19895) @@ -0,0 +1,67 @@ +import os +from results import results +from parseresultsfromdisk import parseresultsfromdisk +from EnumToString import EnumToString +import MatlabFuncs as m + +def loadresultsfromdisk(md,filename): + """ + LOADRESULTSFROMDISK - load results of solution sequence from disk file "filename" + + Usage: + md=loadresultsfromdisk(md=False,filename=False); + """ + + #check number of inputs/outputs + if not md or not filename: + raise ValueError("loadresultsfromdisk: error message.") + + if not md.qmu.isdakota: + + #Check that file exists + if not os.path.exists(filename): + raise OSError("binary file '%s' not found." % filename) + + #initialize md.results if not a structure yet + if not isinstance(md.results,results): + md.results=results() + + #load results onto model + structure=parseresultsfromdisk(filename,not md.settings.io_gather) + if not len(structure): + raise RuntimeError("No result found in binary file '%s'. Check for solution crash." % filename) + setattr(md.results,structure[0].SolutionType,structure) + + #recover solution_type from results + md.private.solution=structure[0].SolutionType + + #read log files onto fields + if os.path.exists(md.miscellaneous.name+'.errlog'): + with open(md.miscellaneous.name+'.errlog','r') as f: + setattr(getattr(md.results,structure[0].SolutionType)[0],'errlog',[line[:-1] for line in f]) + else: + setattr(getattr(md.results,structure[0].SolutionType)[0],'errlog',[]) + + if os.path.exists(md.miscellaneous.name+'.outlog'): + with open(md.miscellaneous.name+'.outlog','r') as f: + setattr(getattr(md.results,structure[0].SolutionType)[0],'outlog',[line[:-1] for line in f]) + else: + setattr(getattr(md.results,structure[0].SolutionType)[0],'outlog',[]) + + if len(getattr(md.results,structure[0].SolutionType)[0].errlog): + print ("loadresultsfromcluster info message: error during solution. Check your errlog and outlog model fields.") + + #if only one solution, extract it from list for user friendliness + if len(structure) == 1 and not m.strcmp(structure[0].SolutionType,'TransientSolution'): + setattr(md.results,structure[0].SolutionType,structure[0]) + + #post processes qmu results if necessary + else: + + if not isinstance(md.private.solution,str): + [md.private.solution]=EnumToString(md.private.solution) + md=postqmu(md) + os.chdir('..') + + return md + Index: ../trunk-jpl/src/py3/solve/marshall.py =================================================================== --- ../trunk-jpl/src/py3/solve/marshall.py (revision 0) +++ ../trunk-jpl/src/py3/solve/marshall.py (revision 19895) @@ -0,0 +1,52 @@ +from WriteData import WriteData +from EnumDefinitions import * + +def marshall(md): + """ + MARSHALL - outputs a compatible binary file from @model md, for certain solution type. + + The routine creates a compatible binary file from @model md + This binary file will be used for parallel runs in JPL-package + + Usage: + marshall(md) + """ + + print("marshalling file '%s.bin'." % md.miscellaneous.name) + + #open file for binary writing + try: + fid=open(md.miscellaneous.name+'.bin','wb') + except IOError as e: + raise IOError("marshall error message: could not open '%s.bin' file for binary writing." % md.miscellaneous.name) + + #First, write MaximumNumberOfEnum to make sure that the Enums are synchronized + WriteData(fid,'enum',MaximumNumberOfDefinitionsEnum(),'data',True,'format','Boolean') + + #Go through all model fields: check that it is a class and call checkconsistency + fields=vars(md) + +# for field in fields.iterkeys(): + for field in md.properties(): + + #Some properties do not need to be marshalled + if field in ['results','radaroverlay','toolkits','cluster','flaim','private']: + continue + + #Check that current field is an object + if not hasattr(getattr(md,field),'marshall'): + raise TypeError("field '%s' is not an object." % field) + + #Marshall current object + #print "marshalling %s ..." % field + exec("md.%s.marshall(md,fid)" % field) + + #Last, write MaximumNumberOfEnum+1 to make sure that the binary file is not corrupt + WriteData(fid,'enum',MaximumNumberOfDefinitionsEnum()+1,'data',True,'format','Boolean'); + + #close file + try: + fid.close() + except IOError as e: + raise IOError("marshall error message: could not close file '%s.bin'." % md.miscellaneous.name) + Index: ../trunk-jpl/src/py3/solve/WriteData.py =================================================================== --- ../trunk-jpl/src/py3/solve/WriteData.py (revision 0) +++ ../trunk-jpl/src/py3/solve/WriteData.py (revision 19895) @@ -0,0 +1,344 @@ +import numpy +import math +import struct +import pairoptions +import MatlabFuncs as m +from EnumDefinitions import * +from EnumToString import EnumToString + +def WriteData(fid,**kwargs): + """ + WRITEDATA - write model field in binary file + + Usage: + WriteData(fid,varargin) + """ + + #process options + options=pairoptions.pairoptions(**kwargs) + + #Get data properties + if options.exist('object'): + #This is an object field, construct enum and data + obj = options.getfieldvalue('object') + fieldname = options.getfieldvalue('fieldname') + classname = options.getfieldvalue('class',str(type(obj)).rsplit('.')[-1].split("'")[0]) + if options.exist('enum'): + enum = options.getfieldvalue('enum') + else: + enum = BuildEnum(classname+'_'+fieldname) + data = getattr(obj,fieldname) + else: + #No processing required + data = options.getfieldvalue('data') + enum = options.getfieldvalue('enum') + format = options.getfieldvalue('format') + mattype = options.getfieldvalue('mattype',0) #only required for matrices + timeserieslength = options.getfieldvalue('timeserieslength',-1) + + #Process sparse matrices +# if issparse(data), +# data=full(data); +# end + + #Scale data if necesarry + if options.exist('scale'): + scale = options.getfieldvalue('scale') + if numpy.size(data) > 1 : + if numpy.size(data,0)==timeserieslength: + data=numpy.array(data) + data[0:-1,:] = scale*data[0:-1,:] + else: + data = scale*data + else: + data = scale*data + if numpy.size(data) > 1 : + if numpy.size(data,0)==timeserieslength: + yts=365.0*24.0*3600.0 + data[-1,:] = yts*data[-1,:] + + #Step 1: write the enum to identify this record uniquely + fid.write(struct.pack('i',enum)) + + #Step 2: write the data itself. + if m.strcmpi(format,'Boolean'): # {{{ +# if len(data) !=1: +# raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0]) + + #first write length of record + fid.write(struct.pack('i',4+4)) #1 bool (disguised as an int)+code + + #write data code: + fid.write(struct.pack('i',FormatToCode(format))) + + #now write integer + fid.write(struct.pack('i',int(data))) #send an int, not easy to send a bool + # }}} + + elif m.strcmpi(format,'Integer'): # {{{ +# if len(data) !=1: +# raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0]) + + #first write length of record + fid.write(struct.pack('i',4+4)) #1 integer + code + + #write data code: + fid.write(struct.pack('i',FormatToCode(format))) + + #now write integer + fid.write(struct.pack('i',data)) + # }}} + + elif m.strcmpi(format,'Double'): # {{{ +# if len(data) !=1: +# raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0]) + + #first write length of record + fid.write(struct.pack('i',8+4)) #1 double+code + + #write data code: + fid.write(struct.pack('i',FormatToCode(format))) + + #now write double + fid.write(struct.pack('d',data)) + # }}} + + elif m.strcmpi(format,'String'): # {{{ + #first write length of record + fid.write(struct.pack('i',len(data)+4+4)) #string + string size + code + + #write data code: + fid.write(struct.pack('i',FormatToCode(format))) + + #now write string + fid.write(struct.pack('i',len(data))) + fid.write(struct.pack('%ds' % len(data),data)) + # }}} + + elif m.strcmpi(format,'BooleanMat'): # {{{ + + if isinstance(data,bool): + data=numpy.array([data]) + elif isinstance(data,(list,tuple)): + data=numpy.array(data).reshape(-1,1) + if numpy.ndim(data) == 1: + if numpy.size(data): + data=data.reshape(numpy.size(data),1) + else: + data=data.reshape(0,0) + + #Get size + s=data.shape + #if matrix = NaN, then do not write anything + if s[0]==1 and s[1]==1 and math.isnan(data[0][0]): + s=(0,0) + + #first write length of record + fid.write(struct.pack('i',4+4+8*s[0]*s[1]+4+4)) #2 integers (32 bits) + the double matrix + code + matrix type + + #write data code and matrix type: + fid.write(struct.pack('i',FormatToCode(format))) + fid.write(struct.pack('i',mattype)) + + #now write matrix + fid.write(struct.pack('i',s[0])) + fid.write(struct.pack('i',s[1])) + for i in range(s[0]): + for j in range(s[1]): + fid.write(struct.pack('d',float(data[i][j]))) #get to the "c" convention, hence the transpose + # }}} + + elif m.strcmpi(format,'IntMat'): # {{{ + + if isinstance(data,int): + data=numpy.array([data]) + elif isinstance(data,(list,tuple)): + data=numpy.array(data).reshape(-1,1) + if numpy.ndim(data) == 1: + if numpy.size(data): + data=data.reshape(numpy.size(data),1) + else: + data=data.reshape(0,0) + + #Get size + s=data.shape + #if matrix = NaN, then do not write anything + if s[0]==1 and s[1]==1 and math.isnan(data[0][0]): + s=(0,0) + + #first write length of record + fid.write(struct.pack('i',4+4+8*s[0]*s[1]+4+4)) #2 integers (32 bits) + the double matrix + code + matrix type + + #write data code and matrix type: + fid.write(struct.pack('i',FormatToCode(format))) + fid.write(struct.pack('i',mattype)) + + #now write matrix + fid.write(struct.pack('i',s[0])) + fid.write(struct.pack('i',s[1])) + for i in range(s[0]): + for j in range(s[1]): + fid.write(struct.pack('d',float(data[i][j]))) #get to the "c" convention, hence the transpose + # }}} + + elif m.strcmpi(format,'DoubleMat'): # {{{ + + if isinstance(data,(bool,int,float)): + data=numpy.array([data]) + elif isinstance(data,(list,tuple)): + data=numpy.array(data).reshape(-1,1) + if numpy.ndim(data) == 1: + if numpy.size(data): + data=data.reshape(numpy.size(data),1) + else: + data=data.reshape(0,0) + + #Get size + s=data.shape + #if matrix = NaN, then do not write anything + if s[0]==1 and s[1]==1 and math.isnan(data[0][0]): + s=(0,0) + + #first write length of record + recordlength=4+4+8*s[0]*s[1]+4+4; #2 integers (32 bits) + the double matrix + code + matrix type + if recordlength > 2**31 : + raise ValueError('field %s cannot be marshalled because it is larger than 4^31 bytes!' % EnumToString(enum)[0]) + + fid.write(struct.pack('i',recordlength)) #2 integers (32 bits) + the double matrix + code + matrix type + + #write data code and matrix type: + fid.write(struct.pack('i',FormatToCode(format))) + fid.write(struct.pack('i',mattype)) + + #now write matrix + fid.write(struct.pack('i',s[0])) + fid.write(struct.pack('i',s[1])) + for i in range(s[0]): + for j in range(s[1]): + fid.write(struct.pack('d',float(data[i][j]))) #get to the "c" convention, hence the transpose + # }}} + + elif m.strcmpi(format,'MatArray'): # {{{ + + #first get length of record + recordlength=4+4 #number of records + code + for matrix in data: + if isinstance(matrix,(bool,int,float)): + matrix=numpy.array([matrix]) + elif isinstance(matrix,(list,tuple)): + matrix=numpy.array(matrix).reshape(-1,1) + if numpy.ndim(matrix) == 1: + if numpy.size(matrix): + matrix=matrix.reshape(numpy.size(matrix),1) + else: + matrix=matrix.reshape(0,0) + + s=matrix.shape + recordlength+=4*2+s[0]*s[1]*8 #row and col of matrix + matrix of doubles + + #write length of record + fid.write(struct.pack('i',recordlength)) + + #write data code: + fid.write(struct.pack('i',FormatToCode(format))) + + #write data, first number of records + fid.write(struct.pack('i',len(data))) + + #write each matrix: + for matrix in data: + if isinstance(matrix,(bool,int,float)): + matrix=numpy.array([matrix]) + elif isinstance(matrix,(list,tuple)): + matrix=numpy.array(matrix).reshape(-1,1) + if numpy.ndim(matrix) == 1: + matrix=matrix.reshape(numpy.size(matrix),1) + + s=matrix.shape + fid.write(struct.pack('i',s[0])) + fid.write(struct.pack('i',s[1])) + for i in range(s[0]): + for j in range(s[1]): + fid.write(struct.pack('d',float(matrix[i][j]))) + # }}} + + elif m.strcmpi(format,'StringArray'): # {{{ + + #first get length of record + recordlength=4+4 #for length of array + code + for string in data: + recordlength+=4+len(string) #for each string + + #write length of record + fid.write(struct.pack('i',recordlength)) + + #write data code: + fid.write(struct.pack('i',FormatToCode(format))) + + #now write length of string array + fid.write(struct.pack('i',len(data))) + + #now write the strings + for string in data: + fid.write(struct.pack('i',len(string))) + fid.write(struct.pack('%ds' % len(string),string)) + # }}} + + else: # {{{ + raise TypeError('WriteData error message: data type: %d not supported yet! (%s)' % (format,EnumToString(enum)[0])) + # }}} + +def BuildEnum(string): # {{{ + """ + BUILDENUM - build enum out of string + + Usage: + enum=BuildEnum(string) + """ + + if '_' in string: + substrs=string.split('_') + string='' + for substr in substrs: + string+=substr[0].upper()+substr[1:] + else: + #take first letter of string and make it uppercase: + string=string[0].upper()+string[1:] + + #Get Enum + enum=StringToEnum(string)[0] + + return enum +# }}} + +def FormatToCode(format): # {{{ + """ + This routine takes the format string, and hardcodes it into an integer, which + is passed along the record, in order to identify the nature of the dataset being + sent. + """ + + if m.strcmpi(format,'Boolean'): + code=1 + elif m.strcmpi(format,'Integer'): + code=2 + elif m.strcmpi(format,'Double'): + code=3 + elif m.strcmpi(format,'String'): + code=4 + elif m.strcmpi(format,'BooleanMat'): + code=5 + elif m.strcmpi(format,'IntMat'): + code=6 + elif m.strcmpi(format,'DoubleMat'): + code=7 + elif m.strcmpi(format,'MatArray'): + code=8 + elif m.strcmpi(format,'StringArray'): + code=9 + else: + raise InputError('FormatToCode error message: data type not supported yet!') + + return code +# }}} + Index: ../trunk-jpl/src/py3/solve/loadresultsfromcluster.py =================================================================== --- ../trunk-jpl/src/py3/solve/loadresultsfromcluster.py (revision 0) +++ ../trunk-jpl/src/py3/solve/loadresultsfromcluster.py (revision 19895) @@ -0,0 +1,69 @@ +import os +import socket +import platform +from loadresultsfromdisk import loadresultsfromdisk + +def loadresultsfromcluster(md,runtimename=False): + """ + LOADRESULTSFROMCLUSTER - load results of solution sequence from cluster + + Usage: + md=loadresultsfromcluster(md,runtimename); + """ + + #retrieve cluster, to be able to call its methods + cluster=md.cluster + + if runtimename: + md.private.runtimename=runtimename + + #Download outputs from the cluster + filelist=[md.miscellaneous.name+'.outlog',md.miscellaneous.name+'.errlog'] + if md.qmu.isdakota: + filelist.append(md.miscellaneous.name+'.qmu.err') + filelist.append(md.miscellaneous.name+'.qmu.out') + if 'tabular_graphics_data' in md.qmu.params: + if md.qmu.params['tabular_graphics_data']: + filelist.append('dakota_tabular.dat') + else: + filelist.append(md.miscellaneous.name+'.outbin') + cluster.Download(md.private.runtimename,filelist) + + #If we are here, no errors in the solution sequence, call loadresultsfromdisk. + md=loadresultsfromdisk(md,md.miscellaneous.name+'.outbin') + + #erase the log and output files + if md.qmu.isdakota: + filename=os.path.join('qmu'+str(os.getpid()),md.miscellaneous.name) + else: + filename=md.miscellaneous.name + TryRem('.outbin',filename) + if not platform.system()=='Windows': + TryRem('.tar.gz',md.private.runtimename) + + TryRem('.errlog',filename) + TryRem('.outlog',filename) + + #erase input file if run was carried out on same platform. + hostname=socket.gethostname() + if hostname==cluster.name: + if md.qmu.isdakota: + filename=os.path.join('qmu'+str(os.getpid()),md.miscellaneous.name) + TryRem('.queue',filename) + else: + filename=md.miscellaneous.name + TryRem('.toolkits',filename) + if not platform.system()=='Windows': + TryRem('.queue',filename) + else: + TryRem('.bat',filename) + + TryRem('.bin',filename) + + return md + +def TryRem(extension,filename): + try: + os.remove(filename+extension) + except OSError: + print('WARNING, no '+extension+' is present for run '+filename) Index: ../trunk-jpl/src/py3/shp/shp2exp.py =================================================================== --- ../trunk-jpl/src/py3/shp/shp2exp.py (revision 0) +++ ../trunk-jpl/src/py3/shp/shp2exp.py (revision 19895) @@ -0,0 +1,50 @@ +import shapefile +import os +from expwrite import expwrite + +def shp2exp(shapefilename,*expfilename): + ''' + Convert a shapefile to an .exp file. Optionally, expfilename can be + specified to give a name for the .exp file to be created, otherwise the + .exp file will have the same prefix as the .shp file. + + Usage: + shp2exp(shapefilename) + shp2exp(shapefilename,expfilename) + + Examples: + shp2exp('Domain.shp') % creates Domain.exp + shp2exp('Domain.shp','DomainForISSM.exp') + ''' + + if not os.path.exists(shapefilename): + raise IOError("shp2exp error message: file '%s' not found!" % parametername) + if not len(expfilename): + expfile=os.path.splitext(shapefilename)[0]+'.exp' + else: + expfile=expfilename[0] + + shp=shapefile.Reader(shapefilename) + expdict=dict(closed=1,density=1) + + x=[] + y=[] + for i in range(len(shp.shapes())): + geom=shp.shapes()[i].shapeType + if geom==5: # polygon + tmpx=[p[0] for p in shp.shapes()[i].points] + tmpy=[q[1] for q in shp.shapes()[i].points] + x.append(tmpx) + y.append(tmpy) + elif geom==3: # line + tmpx=[p[0] for p in shp.shapes()[i].points] + tmpy=[q[1] for q in shp.shapes()[i].points] + x.append(tmpx) + y.append(tmpy) + elif geom==1: # point + x.append(shp.shapes()[i].points[0][0]) + y.append(shp.shapes()[i].points[0][1]) + + expdict['x']=x + expdict['y']=y + expwrite(expdict,expfile) Index: ../trunk-jpl/src/py3/geometry/GetAreas.py =================================================================== --- ../trunk-jpl/src/py3/geometry/GetAreas.py (revision 0) +++ ../trunk-jpl/src/py3/geometry/GetAreas.py (revision 19895) @@ -0,0 +1,52 @@ +import numpy + +def GetAreas(index,x,y,z=numpy.array([])): + """ + GETAREAS - compute areas or volumes of elements + + compute areas of triangular elements or volumes + of pentahedrons + + Usage: + areas =GetAreas(index,x,y); + volumes=GetAreas(index,x,y,z); + + Examples: + areas =GetAreas(md.mesh.elements,md.mesh.x,md.mesh.y); + volumes=GetAreas(md.mesh.elements,md.mesh.x,md.mesh.y,md.z); + """ + + #get number of elements and number of nodes + nels=numpy.size(index,axis=0) + nods=numpy.size(x) + + #some checks + if numpy.size(y)!=nods or (z and numpy.size(z)!=nods): + raise TypeError("GetAreas error message: x,y and z do not have the same length.") + if numpy.max(index)>nods: + raise TypeError("GetAreas error message: index should not have values above %d." % nods) + if (not z and numpy.size(index,axis=1)!=3): + raise TypeError("GetAreas error message: index should have 3 columns for 2d meshes.") + if (z and numpy.size(index,axis=1)!=6): + raise TypeError("GetAreas error message: index should have 6 columns for 3d meshes.") + + #initialization + areas=numpy.zeros(nels) + x1=x[index[:,0]-1] + x2=x[index[:,1]-1] + x3=x[index[:,2]-1] + y1=y[index[:,0]-1] + y2=y[index[:,1]-1] + y3=y[index[:,2]-1] + + #compute the volume of each element + if not z: + #compute the surface of the triangle + areas=(0.5*((x2-x1)*(y3-y1)-(y2-y1)*(x3-x1))) + else: + #V=area(triangle)*1/3(z1+z2+z3) + thickness=numpy.mean(z[index[:,3:6]-1])-numpy.mean(z[index[:,0:3]-1]) + areas=(0.5*((x2-x1)*(y3-y1)-(y2-y1)*(x3-x1)))*thickness + + return areas + Index: ../trunk-jpl/src/py3/geometry/SegIntersect.py =================================================================== --- ../trunk-jpl/src/py3/geometry/SegIntersect.py (revision 0) +++ ../trunk-jpl/src/py3/geometry/SegIntersect.py (revision 19895) @@ -0,0 +1,82 @@ +import numpy + +def SegIntersect(seg1,seg2): + """ + SEGINTERSECT - test of segments intersection + + return 1 if the two segments intersect + seg1=[x1 y1; x2 y2] + seg2=[x1 y1; x2 y2] + + Usage: + bval=SegIntersect(seg1,seg2) + """ + + bval=1 + + xA=seg1[0,0] + yA=seg1[0,1] + xB=seg1[1,0] + yB=seg1[1,1] + xC=seg2[0,0] + yC=seg2[0,1] + xD=seg2[1,0] + yD=seg2[1,1] + + O2A=numpy.array([xA,yA])-numpy.array([xD/2.+xC/2.,yD/2.+yC/2.]) + O2B=numpy.array([xB,yB])-numpy.array([xD/2.+xC/2.,yD/2.+yC/2.]) + O1C=numpy.array([xC,yC])-numpy.array([xA/2.+xB/2.,yB/2.+yA/2.]) + O1D=numpy.array([xD,yD])-numpy.array([xA/2.+xB/2.,yB/2.+yA/2.]) + + n1=numpy.array([yA-yB,xB-xA]) #normal vector to segA + n2=numpy.array([yC-yD,xD-xC]) #normal vector to segB + + test1=numpy.dot(n2,O2A) + test2=numpy.dot(n2,O2B) + + if test1*test2>0: + bval=0 + return bval + + test3=numpy.dot(n1,O1C) + test4=numpy.dot(n1,O1D) + + if test3*test4>0: + bval=0 + return bval + + #if colinear + if test1*test2==0 and test3*test4==0 and numpy.linalg.det(numpy.hstack((n1.reshape((-1,1)),n2.reshape(-1,1))))==0: + + #projection on the axis O1O2 + O2O1=numpy.array([xA/2.+xB/2.,yB/2.+yA/2.])-numpy.array([xD/2.+xC/2.,yD/2.+yC/2.]) + O1A=numpy.dot(O2O1,(O2A-O2O1)) + O1B=numpy.dot(O2O1,(O2B-O2O1)) + O1C=numpy.dot(O2O1,O1C) + O1D=numpy.dot(O2O1,O1D) + + #test if one point is included in the other segment (->bval=1) + if (O1C-O1A)*(O1D-O1A)<0: + bval=1 + return bval + if (O1C-O1B)*(O1D-O1B)<0: + bval=1 + return bval + if (O1A-O1C)*(O1B-O1C)<0: + bval=1 + return bval + if (O1A-O1D)*(O1B-O1D)<0: + bval=1 + return bval + + #test if the 2 segments have the same middle (->bval=1) + if O2O1==0: + bval=1 + return bval + + #else + bval=0 + return bval + + return bval + Index: ../trunk-jpl/src/py3/geometry/slope.py =================================================================== --- ../trunk-jpl/src/py3/geometry/slope.py (revision 0) +++ ../trunk-jpl/src/py3/geometry/slope.py (revision 19895) @@ -0,0 +1,46 @@ +import numpy as npy +from GetNodalFunctionsCoeff import GetNodalFunctionsCoeff + +def slope(md,*args): + """ + SLOPE - compute the surface slope + + Usage: + sx,sy,s=slope(md) + sx,sy,s=slope(md,md.results.TransientSolution(1).Surface) + """ + + #load some variables (it is much faster if the variables are loaded from md once for all) + if md.mesh.dimension()==2: + numberofelements=md.mesh.numberofelements + numberofnodes=md.mesh.numberofvertices + index=md.mesh.elements + x=md.mesh.x ; y=md.mesh.y + else: + numberofelements=md.mesh.numberofelements2d + numberofnodes=md.mesh.numberofvertices2d + index=md.mesh.elements2d + x=md.mesh.x2d; y=md.mesh.y2d + + if len(args)==0: + surf=md.geometry.surface + elif len(args)==1: + surf=args[0] + else: + raise RuntimeError("slope.py usage error") + + #%compute nodal functions coefficients N(x,y)=alpha x + beta y + gamma + alpha,beta=GetNodalFunctionsCoeff(index,x,y)[0:2] + + summation=npy.array([[1],[1],[1]]) + sx=npy.dot(surf[index-1]*alpha,summation).reshape(-1,) + sy=npy.dot(surf[index-1]*beta,summation).reshape(-1,) + + s=npy.sqrt(sx**2+sy**2) + + if md.mesh.dimension()==3: + sx=project3d(md,'vector',sx,'type','element') + sy=project3d(md,'vector',sy,'type','element') + s=npy.sqrt(sx**2+sy**2) + + return (sx,sy,s) Index: ../trunk-jpl/src/py3/geometry/FlagElements.py =================================================================== --- ../trunk-jpl/src/py3/geometry/FlagElements.py (revision 0) +++ ../trunk-jpl/src/py3/geometry/FlagElements.py (revision 19895) @@ -0,0 +1,67 @@ +import numpy +import os +#from basinzoom import basinzoon +from ContourToMesh import ContourToMesh +import MatlabFuncs as m +import PythonFuncs as p + +def FlagElements(md,region): + """ + FLAGELEMENTS - flag the elements in an region + + The region can be given with an exp file, a list of elements or vertices + + Usage: + flag=FlagElements(md,region); + + Example: + flag=FlagElements(md,'all'); + flag=FlagElements(md,''); + flag=FlagElements(md,'Domain.exp'); + flag=FlagElements(md,'~Domain.exp'); + """ + + if isinstance(region,str): + if not region: + flag=numpy.zeros(md.mesh.numberofelements,bool) + invert=0 + elif m.strcmpi(region,'all'): + flag=numpy.ones(md.mesh.numberofelements,bool) + invert=0 + else: + #make sure that we actually don't want the elements outside the domain outline! + if m.strcmpi(region[0],'~'): + region=region[1:] + invert=1 + else: + invert=0 + + #does the region domain outline exist or do we have to look for xlim,ylim in basinzoom? + if not os.path.exists(region): + if len(region)>3 and not m.strcmp(region[-4:],'.exp'): + raise IOError("Error: File 'region' not found!" % region) + raise RuntimeError("FlagElements.py calling basinzoom.py is not complete.") + xlim,ylim=basinzoom('basin',region) + flag_nodes=p.logical_and_n(md.mesh.xxlim[0],md.mesh.yylim[0]) + flag=numpy.prod(flag_nodes[md.mesh.elements],axis=1).astype(bool) + else: + #ok, flag elements + [flag,dum]=ContourToMesh(md.mesh.elements[:,0:3].copy(),md.mesh.x,md.mesh.y,region,'element',1) + flag=flag.astype(bool) + + if invert: + flag=numpy.logical_not(flag) + + elif isinstance(region,numpy.ndarray) or isinstance(region,bool): + if numpy.size(region,0)==md.mesh.numberofelements: + flag=region + elif numpy.size(region,0)==md.mesh.numberofvertices: + flag=(numpy.sum(region[md.mesh.elements-1]>0,axis=1)==numpy.size(md.mesh.elements,1)) + else: + raise TypeError("Flaglist for region must be of same size as number of elements in model.") + + else: + raise TypeError("Invalid region option") + + return flag + Index: ../trunk-jpl/src/py3/exp/expread.py =================================================================== --- ../trunk-jpl/src/py3/exp/expread.py (revision 0) +++ ../trunk-jpl/src/py3/exp/expread.py (revision 19895) @@ -0,0 +1,100 @@ +import os.path +import numpy +from collections import OrderedDict +import MatlabFuncs as m + +def expread(filename): + """ + EXPREAD - read a file exp and build a Structure + + This routine reads a file .exp and builds a list of dicts containing the + fields x and y corresponding to the coordinates, one for the filename of + the exp file, for the density, for the nodes, and a field closed to + indicate if the domain is closed. + The first argument is the .exp file to be read and the second one (optional) + indicate if the last point shall be read (1 to read it, 0 not to). + + Usage: + contours=expread(filename) + + Example: + contours=expread('domainoutline.exp') + contours=expread('domainoutline.exp') + + See also EXPDOC, EXPWRITEASVERTICES + """ + + #some checks + if not os.path.exists(filename): + raise OSError("expread error message: file '%s' not found!" % filename) + + #initialize number of profile + contours=[] + + #open file + fid=open(filename,'r') + + #loop over the number of profiles + while True: + + #update number of profiles + contour=OrderedDict() + + #Get file name + A=fid.readline() + while A=='\n': + A=fid.readline() + if not A: + break + A=A.split(None,1) + if not (len(A) == 2 and m.strcmp(A[0],'##') and m.strncmp(A[1],'Name:',5)): + break + if len(A[1])>5: + contour['name']=A[1][5:-1] + else: + contour['name']='' + + #Get Icon + A=fid.readline().split(None,1) + if not (len(A) == 2 and m.strcmp(A[0],'##') and m.strncmp(A[1],'Icon:',5)): + break + + #Get Info + A=fid.readline().split() + if not (len(A) == 4 and m.strcmp(A[0],'#') and m.strcmp(A[1],'Points')): + break + + #Get number of nodes and density + A=fid.readline().split() + contour['nods']=int(A[0]) + contour['density']=float(A[1]) + + #Get Info + A=fid.readline().split() + if not (len(A) == 5 and m.strcmp(A[0],'#') and m.strcmp(A[1],'X') and m.strcmp(A[2],'pos') \ + and m.strcmp(A[3],'Y') and m.strcmp(A[4],'pos')): + break + + #Get Coordinates + contour['x']=numpy.empty(contour['nods']) + contour['y']=numpy.empty(contour['nods']) + for i in range(int(contour['nods'])): + A=fid.readline().split() + contour['x'][i]=float(A[0]) + contour['y'][i]=float(A[1]) + + #Check if closed + if (contour['nods'] > 1) and \ + (contour['x'][-1] == contour['x'][0]) and \ + (contour['y'][-1] == contour['y'][0]): + contour['closed']=True + else: + contour['closed']=False + + contours.append(contour) + + #close file + fid.close() + + return contours + Index: ../trunk-jpl/src/py3/exp/expdisp.py =================================================================== --- ../trunk-jpl/src/py3/exp/expdisp.py (revision 0) +++ ../trunk-jpl/src/py3/exp/expdisp.py (revision 19895) @@ -0,0 +1,27 @@ +from expread import expread +import numpy as npy + +def expdisp(domainoutline,ax,linestyle='--k',linewidth=1,unitmultiplier=1.): + ''' + plot the contents of a domain outline file + + This routine reads in a domain outline file and plots all of the x,y contours + + 'ax' is a handle to the current plot axes, onto which the contours are to be drawn + + Usage: + expdisp(domainoutline,ax) + + Example: + expdisp('domain.exp',plt.gca(),linestyle='--k',linewidth=2,unitmultiplier=1.e3) + ''' + + domain=expread(domainoutline) + + for i in range(len(domain)): + if domain[i]['nods']==1: + ax.plot(domain[i]['x']*unitmultiplier,domain[i]['y']*unitmultiplier,'o',mec='k',mfc='r',ms=10) + else: + x=domain[i]['x'].tolist() # since expread returns a string representation of the arrays + y=domain[i]['y'].tolist() + ax.plot(x*unitmultiplier,y*unitmultiplier,linestyle,linewidth=linewidth) Index: ../trunk-jpl/src/py3/exp/expcoarsen.py =================================================================== --- ../trunk-jpl/src/py3/exp/expcoarsen.py (revision 0) +++ ../trunk-jpl/src/py3/exp/expcoarsen.py (revision 19895) @@ -0,0 +1,77 @@ +import os.path +import numpy as npy +from collections import OrderedDict +from expread import expread +from expwrite import expwrite + +def expcoarsen(newfile,oldfile,resolution): + """ + EXPCOARSEN - coarsen an exp contour + + This routine read an Argus file and remove points with respect to + the resolution (in meters) given in input. + + Usage: + expcoarsen(newfile,oldfile,resolution) + + Example: + expcoarsen('DomainOutline.exp','Antarctica.exp',4000) + """ + + #Some checks + if not os.path.exists(oldfile): + raise OSError("expcoarsen error message: file '%s' not found!" % oldfile) + if os.path.exists(newfile): + choice=input('A file ' + newfile + ' already exists, do you want to modify it? (y/n)') + if choice not in 'y': + print('no modification done ... exiting') + return 0 + + #Get exp oldfile + contours=expread(oldfile) + newcontours=[] + + for contour in contours: + + numpoints=npy.size(contour['x']) + + j=0 + x=contour['x'] + y=contour['y'] + + #stop if we have reached end of profile (always keep the last point) + while j=2: + xi=npy.linspace(x[j],x[j+1],division) + yi=npy.linspace(y[j],y[j+1],division) + + x=npy.hstack((x[0:j+1],xi[1:-1],x[j+1:])) + y=npy.hstack((y[0:j+1],yi[1:-1],y[j+1:])) + + #update current point + j=j+1+division-2 + numpoints=numpoints+division-2 + else: + #update current point + j=j+1 + + if npy.size(x)>1: + #keep the (x,y) contour arond + newcontour=OrderedDict() + newcontour['nods']=npy.size(x) + newcontour['density']=contour['density'] + newcontour['x']=x + newcontour['y']=y + newcontours.append(newcontour) + + #write output + expwrite(newcontours,newfile) Index: ../trunk-jpl/src/py3/exp/expwrite.py =================================================================== --- ../trunk-jpl/src/py3/exp/expwrite.py (revision 0) +++ ../trunk-jpl/src/py3/exp/expwrite.py (revision 19895) @@ -0,0 +1,47 @@ +import numpy + +def expwrite(contours,filename): + """ + EXPWRITE - write an Argus file from a dictionary given in input + + This routine writes an Argus file from a dict containing the fields: + x and y of the coordinates of the points. + The first argument is the list containing the points coordinates + and the second one the file to be written. + + Usage: + expwrite(contours,filename) + + Example: + expwrite(coordstruct,'domainoutline.exp') + + See also EXPDOC, EXPREAD, EXPWRITEASVERTICES + """ + + fid=open(filename,'w') + for x,y in zip(contours['x'],contours['y']): + #if numpy.size(contour['x'])!=numpy.size(contour['y']): + if len(x)!=len(y): + raise RuntimeError("contours x and y coordinates must be of identical size") + if 'name' in contours: + fid.write("%s%s\n" % ('## Name:',contours['name'])) + else: + fid.write("%s%s\n" % ('## Name:',filename)) + + #Add density if it's not there FIXME what is this ever used for? + #if 'density' not in contours: + # contours['density']=1 + density=1 + + fid.write("%s\n" % '## Icon:0') + fid.write("%s\n" % '# Points Count Value') + #fid.write("%i %f\n" % (numpy.size(contour['x']),contour['density'])) + fid.write("%i %f\n" % (numpy.size(x),density)) + fid.write("%s\n" % '# X pos Y pos') + #for x,y in zip(contour['x'],contour['y']): + for xi,yi in zip(x,y): + fid.write("%10.10f %10.10f\n" % (xi,yi)) + fid.write("\n") + + fid.close() + Index: ../trunk-jpl/src/py3/mesh/GetNodalFunctionsCoeff.py =================================================================== --- ../trunk-jpl/src/py3/mesh/GetNodalFunctionsCoeff.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/GetNodalFunctionsCoeff.py (revision 19895) @@ -0,0 +1,58 @@ +import numpy + +def GetNodalFunctionsCoeff(index,x,y): + """ + GETNODELFUNCTIONSCOEFF - compute nodal functions coefficients + + Compute the coefficients alpha beta and optionaly gamma of + 2d triangular elements. For each element, the nodal function + is defined as: + N(x,y)=sum(i=1:3) alpha_i * x + beta_i * y + gamma_i + + Usage: + [alpha beta]=GetNodalFunctionsCoeff(index,x,y); + [alpha beta gamma]=GetNodalFunctionsCoeff(index,x,y); + + Example: + [alpha beta gamma]=GetNodalFunctionsCoeff(md.mesh.elements,md.mesh.x,md.mesh.y); + """ + + #make columns out of x and y + x=x.reshape(-1) + y=y.reshape(-1) + + #get nels and nods + nels=numpy.size(index,axis=0) + nods=numpy.size(x) + + #some checks + if numpy.size(y)!=nods: + raise TypeError("GetNodalFunctionsCoeff error message: x and y do not have the same length.") + if numpy.max(index)>nods: + raise TypeError("GetNodalFunctionsCoeff error message: index should not have values above %d." % nods) + if numpy.size(index,axis=1)!=3: + raise TypeError("GetNodalFunctionsCoeff error message: only 2d meshes supported. index should have 3 columns.") + + #initialize output + alpha=numpy.zeros((nels,3)) + beta=numpy.zeros((nels,3)) + + #compute nodal functions coefficients N(x,y)=alpha x + beta y +gamma + x1=x[index[:,0]-1] + x2=x[index[:,1]-1] + x3=x[index[:,2]-1] + y1=y[index[:,0]-1] + y2=y[index[:,1]-1] + y3=y[index[:,2]-1] + invdet=1./(x1*(y2-y3)-x2*(y1-y3)+x3*(y1-y2)) + + #get alpha and beta + alpha=numpy.hstack(((invdet*(y2-y3)).reshape(-1,1),(invdet*(y3-y1)).reshape(-1,1),(invdet*(y1-y2)).reshape(-1,1))) + beta =numpy.hstack(((invdet*(x3-x2)).reshape(-1,1),(invdet*(x1-x3)).reshape(-1,1),(invdet*(x2-x1)).reshape(-1,1))) + + #get gamma if requested + gamma=numpy.zeros((nels,3)) + gamma=numpy.hstack(((invdet*(x2*y3-x3*y2)).reshape(-1,1),(invdet*(y1*x3-y3*x1)).reshape(-1,1),(invdet*(x1*y2-x2*y1)).reshape(-1,1))) + + return alpha,beta,gamma + Index: ../trunk-jpl/src/py3/mesh/meshconvert.py =================================================================== --- ../trunk-jpl/src/py3/mesh/meshconvert.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/meshconvert.py (revision 19895) @@ -0,0 +1,52 @@ +import numpy +from collections import OrderedDict +from BamgConvertMesh import BamgConvertMesh +from mesh2d import mesh2d +from bamgmesh import bamgmesh +from bamggeom import bamggeom + +def meshconvert(md,*args): + """ + CONVERTMESH - convert mesh to bamg mesh + + Usage: + md=meshconvert(md); + md=meshconvert(md,index,x,y); + """ + + if not len(args)==0 and not len(args)==3: + raise TypeError("meshconvert error message: bad usage") + + if not len(args): + index = md.mesh.elements + x = md.mesh.x + y = md.mesh.y + else: + index = args[0] + x = args[1] + y = args[2] + + #call Bamg + bamgmesh_out,bamggeom_out=BamgConvertMesh(index,x,y) + + # plug results onto model + md.private.bamg = OrderedDict() + md.private.bamg['mesh'] = bamgmesh(bamgmesh_out) + md.private.bamg['geometry'] = bamggeom(bamggeom_out) + md.mesh = mesh2d() + md.mesh.x = bamgmesh_out['Vertices'][:,0].copy() + md.mesh.y = bamgmesh_out['Vertices'][:,1].copy() + md.mesh.elements = bamgmesh_out['Triangles'][:,0:3].astype(int) + md.mesh.edges = bamgmesh_out['IssmEdges'].astype(int) + md.mesh.segments = bamgmesh_out['IssmSegments'][:,0:3].astype(int) + md.mesh.segmentmarkers = bamgmesh_out['IssmSegments'][:,3].astype(int) + + #Fill in rest of fields: + md.mesh.numberofelements = numpy.size(md.mesh.elements,axis=0) + md.mesh.numberofvertices = numpy.size(md.mesh.x) + md.mesh.numberofedges = numpy.size(md.mesh.edges,axis=0) + md.mesh.vertexonboundary = numpy.zeros(md.mesh.numberofvertices,bool) + md.mesh.vertexonboundary[md.mesh.segments[:,0:2]-1] = True + + return md + Index: ../trunk-jpl/src/py3/mesh/squaremesh.py =================================================================== --- ../trunk-jpl/src/py3/mesh/squaremesh.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/squaremesh.py (revision 19895) @@ -0,0 +1,76 @@ +import numpy +from NodeConnectivity import NodeConnectivity +from ElementConnectivity import ElementConnectivity +from mesh2d import mesh2d + +def squaremesh(md,Lx,Ly,nx,ny): + """ + SQUAREMESH - create a structured square mesh + + This script will generate a structured square mesh + Lx and Ly are the dimension of the domain (in meters) + nx anx ny are the number of nodes in the x and y direction + The coordinates x and y returned are in meters. + + Usage: + [md]=squaremesh(md,Lx,Ly,nx,ny) + """ + + #get number of elements and number of nodes + nel=(nx-1)*(ny-1)*2 + nods=nx*ny + + #initialization + index=numpy.zeros((nel,3),int) + x=numpy.zeros((nx*ny)) + y=numpy.zeros((nx*ny)) + + #create coordinates + for n in range(0,nx): + for m in range(0,ny): + x[n*ny+m]=float(n) + y[n*ny+m]=float(m) + + #create index + for n in range(0,nx-1): + for m in range(0,ny-1): + A=n*ny+(m+1) + B=A+1 + C=(n+1)*ny+(m+1) + D=C+1 + index[n*(ny-1)*2+2*m,:]=[A,C,B] + index[n*(ny-1)*2+2*(m+1)-1,:]=[B,C,D] + + #Scale x and y + x=x/numpy.max(x)*Lx + y=y/numpy.max(y)*Ly + + #create segments + segments=numpy.zeros((2*(nx-1)+2*(ny-1),3),int) + #left edge: + segments[0:ny-1,:]=numpy.hstack((numpy.arange(2,ny+1).reshape(-1,1),numpy.arange(1,ny).reshape(-1,1),(2*numpy.arange(1,ny)-1).reshape(-1,1))) + #right edge: + segments[ny-1:2*(ny-1),:]=numpy.hstack((numpy.arange(ny*(nx-1)+1,nx*ny).reshape(-1,1),numpy.arange(ny*(nx-1)+2,nx*ny+1).reshape(-1,1),2*numpy.arange((ny-1)*(nx-2)+1,(nx-1)*(ny-1)+1).reshape(-1,1))) + #front edge: + segments[2*(ny-1):2*(ny-1)+(nx-1),:]=numpy.hstack((numpy.arange(2*ny,ny*nx+1,ny).reshape(-1,1),numpy.arange(ny,ny*(nx-1)+1,ny).reshape(-1,1),numpy.arange(2*(ny-1),2*(nx-1)*(ny-1)+1,2*(ny-1)).reshape(-1,1))) + #back edge + segments[2*(ny-1)+(nx-1):2*(nx-1)+2*(ny-1),:]=numpy.hstack((numpy.arange(1,(nx-2)*ny+2,ny).reshape(-1,1),numpy.arange(ny+1,ny*(nx-1)+2,ny).reshape(-1,1),numpy.arange(1,2*(nx-2)*(ny-1)+2,2*(ny-1)).reshape(-1,1))) + + #plug coordinates and nodes + md.mesh=mesh2d() + md.mesh.x=x + md.mesh.y=y + md.mesh.numberofvertices=nods + md.mesh.vertexonboundary=numpy.zeros((nods),bool) + md.mesh.vertexonboundary[segments[:,0:2]-1]=True + + #plug elements + md.mesh.elements=index + md.mesh.segments=segments + md.mesh.numberofelements=nel + + #Now, build the connectivity tables for this mesh. + [md.mesh.vertexconnectivity]=NodeConnectivity(md.mesh.elements,md.mesh.numberofvertices) + [md.mesh.elementconnectivity]=ElementConnectivity(md.mesh.elements,md.mesh.vertexconnectivity) + + return md Index: ../trunk-jpl/src/py3/mesh/ComputeMetric.py =================================================================== --- ../trunk-jpl/src/py3/mesh/ComputeMetric.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/ComputeMetric.py (revision 19895) @@ -0,0 +1,74 @@ +import numpy + +def ComputeMetric(hessian,scale,epsilon,hmin,hmax,pos): + """ + COMPUTEMETRIC - compute metric from an Hessian + + Usage: + metric=ComputeMetric(hessian,scale,epsilon,hmin,hmax,pos) + pos is contains the positions where the metric is wished to be maximized (water?) + + Example: + metric=ComputeMetric(hessian,2/9,10^-1,100,10^5,[]) + """ + + #first, find the eigen values of each line of H=[hessian(i,1) hessian(i,2); hessian(i,2) hessian(i,3)] + a=hessian[:,0] + b=hessian[:,1] + d=hessian[:,2] + lambda1=0.5*((a+d)+numpy.sqrt(4.*b**2+(a-d)**2)) + lambda2=0.5*((a+d)-numpy.sqrt(4.*b**2+(a-d)**2)) + pos1=numpy.nonzero(lambda1==0.)[0] + pos2=numpy.nonzero(lambda2==0.)[0] + pos3=numpy.nonzero(numpy.logical_and(b==0.,lambda1==lambda2))[0] + + #Modify the eigen values to control the shape of the elements + lambda1=numpy.minimum(numpy.maximum(numpy.abs(lambda1)*scale/epsilon,1./hmax**2),1./hmin**2) + lambda2=numpy.minimum(numpy.maximum(numpy.abs(lambda2)*scale/epsilon,1./hmax**2),1./hmin**2) + + #compute eigen vectors + norm1=numpy.sqrt(8.*b**2+2.*(d-a)**2+2.*(d-a)*numpy.sqrt((a-d)**2+4.*b**2)) + v1x=2.*b/norm1 + v1y=((d-a)+numpy.sqrt((a-d)**2+4.*b**2))/norm1 + norm2=numpy.sqrt(8.*b**2+2.*(d-a)**2-2.*(d-a)*numpy.sqrt((a-d)**2+4.*b**2)) + v2x=2.*b/norm2 + v2y=((d-a)-numpy.sqrt((a-d)**2+4.*b**2))/norm2 + + v1x[pos3]=1. + v1y[pos3]=0. + v2x[pos3]=0. + v2y[pos3]=1. + + #Compute new metric (for each node M=V*Lambda*V^-1) + metric=numpy.hstack((((v1x*v2y-v1y*v2x)**(-1)*( lambda1*v2y*v1x-lambda2*v1y*v2x)).reshape(-1,1), \ + ((v1x*v2y-v1y*v2x)**(-1)*( lambda1*v1y*v2y-lambda2*v1y*v2y)).reshape(-1,1), \ + ((v1x*v2y-v1y*v2x)**(-1)*(-lambda1*v2x*v1y+lambda2*v1x*v2y)).reshape(-1,1))) + + #some corrections for 0 eigen values + metric[pos1,:]=numpy.tile(numpy.array([[1./hmax**2,0.,1./hmax**2]]),(numpy.size(pos1),1)) + metric[pos2,:]=numpy.tile(numpy.array([[1./hmax**2,0.,1./hmax**2]]),(numpy.size(pos2),1)) + + #take care of water elements + metric[pos ,:]=numpy.tile(numpy.array([[1./hmax**2,0.,1./hmax**2]]),(numpy.size(pos ),1)) + + #take care of NaNs if any (use Numpy eig in a loop) + pos=numpy.nonzero(numpy.isnan(metric))[0] + if numpy.size(pos): + print((" %i NaN found in the metric. Use Numpy routine..." % numpy.size(pos))) + for posi in pos: + H=numpy.array([[hessian[posi,0],hessian[posi,1]],[hessian[posi,1],hessian[posi,2]]]) + [v,u]=numpy.linalg.eig(H) + v=numpy.diag(v) + lambda1=v[0,0] + lambda2=v[1,1] + v[0,0]=numpy.minimum(numpy.maximum(numpy.abs(lambda1)*scale/epsilon,1./hmax**2),1./hmin**2) + v[1,1]=numpy.minimum(numpy.maximum(numpy.abs(lambda2)*scale/epsilon,1./hmax**2),1./hmin**2) + + metricTria=numpy.dot(numpy.dot(u,v),numpy.linalg.inv(u)) + metric[posi,:]=numpy.array([metricTria[0,0],metricTria[0,1],metricTria[1,1]]) + + if numpy.any(numpy.isnan(metric)): + raise RunTimeError("ComputeMetric error message: NaN in the metric despite our efforts...") + + return metric + Index: ../trunk-jpl/src/py3/mesh/rifts/meshprocessoutsiderifts.py =================================================================== --- ../trunk-jpl/src/py3/mesh/rifts/meshprocessoutsiderifts.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/rifts/meshprocessoutsiderifts.py (revision 19895) @@ -0,0 +1,104 @@ +import numpy +from ElementsFromEdge import ElementsFromEdge +import MatlabFuncs as m + +def meshprocessoutsiderifts(md,domainoutline): + """ + MESHPROCESSOUTSIDERIFTS - process rifts when they touch the domain outline + + Usage: + md=meshprocessoutsiderifts(md,domain) + + """ + + #go through rifts, and figure out which ones touch the domain outline + for rift in md.rifts.riftstruct: + + #first, flag nodes that belong to the domain outline + flags=ContourToMesh(md.mesh.elements,md.mesh.x,md.mesh.y,domainoutline,'node',0) + + tips=rift.tips + outsidetips=tips[numpy.nonzero(flags[rift.tips-1])[0]] + + #we have found outsidetips, tips that touch the domain outline. go through them + for tip in outsidetips: + + #find tip in the segments, take first segment (there should be 2) that holds tip, + #and node_connected_to_tip is the other node on this segment: + tipindex=numpy.nonzero(rift.segments[:,0]==tip)[0] + if tipindex: + tipindex=tipindex[0] + node_connected_to_tip=rift.segments[tipindex,1] + else: + tipindex=numpy.nonzero(rift.segments[:,1]==tip)[0] + tipindex=tipindex[0] + node_connected_to_tip=rift.segments[tipindex,1] + + #ok, we have the tip node, and the first node connected to it, on the rift. Now, + #identify all the elements that are connected to the tip, and that are on the same + #side of the rift. + A=tip + B=node_connected_to_tip + + elements=numpy.empty(0,int) + + while flags(B): #as long as B does not belong to the domain outline, keep looking. + #detect elements on edge A,B: + edgeelements=ElementsFromEdge(md.mesh.elements,A,B) + #rule out those we already detected + already_detected=m.ismember(edgeelements,elements) + nextelement=edgeelements(numpy.nonzero(numpy.logical_not(already_detected))[0]) + #add new detected element to the list of elements we are looking for. + elements=numpy.concatenate((elements,nextelement)) + #new B: + B=md.mesh.elements[nextelement-1,numpy.nonzero(numpy.logical_not(m.ismember(md.mesh.elements[nextelement-1,:],numpy.array([A,B]))))] + + #take the list of elements on one side of the rift that connect to the tip, + #and duplicate the tip on them, so as to open the rift to the outside. + num=numpy.size(md.mesh.x)+1 + md.mesh.x=numpy.concatenate((md.mesh.x,md.mesh.x[tip])) + md.mesh.y=numpy.concatenate((md.mesh.y,md.mesh.y[tip])) + md.mesh.numberofvertices=num + + #replace tip in elements + newelements=md.mesh.elements[elements-1,:] + pos=numpy.nonzero(newelements==tip) + newelements[pos]=num + md.mesh.elements[elements-1,:]=newelements + rift.tips=numpy.concatenate((rift.tips,num)) + + #deal with segments + tipsegments=numpy.nonzero(numpy.logical_or(md.mesh.segments[:,0]==tip,md.mesh.segments[:,1]==tip))[0] + for segment_index in tipsegments: + pos=numpy.nonzero(md.mesh.segments[segment_index,0:2]!=tip)[0] + other_node=md.mesh.segments[segment_index,pos] + if not isconnected(md.mesh.elements,other_node,tip): + pos=numpy.nonzero(md.mesh.segments[segment_index,0:2]==tip)[0] + md.mesh.segments[segment_index,pos]=num + + #Fill in rest of fields: + md.mesh.numberofelements=numpy.size(md.mesh.elements,axis=0) + md.mesh.numberofvertices=numpy.size(md.mesh.x) + md.mesh.vertexonboundary=numpy.zeros(numpy.size(md.mesh.x),bool) + md.mesh.vertexonboundary[md.mesh.segments[:,0:2]-1]=True + md.rifts.numrifts=length(md.rifts.riftstruct) + + return md + +def isconnected(elements,A,B): # {{{ + """ + ISCONNECTED: are two nodes connected by a triangulation? + + Usage: flag=isconnected(elements,A,B) + + """ + + elements=ElementsFromEdge(elements,A,B) + if not elements: + flag=0 + else: + flag=1 + + return flag + # }}} + Index: ../trunk-jpl/src/py3/mesh/rifts/meshprocessrifts.py =================================================================== --- ../trunk-jpl/src/py3/mesh/rifts/meshprocessrifts.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/rifts/meshprocessrifts.py (revision 19895) @@ -0,0 +1,64 @@ +import numpy +from TriMeshProcessRifts import TriMeshProcessRifts +from ContourToMesh import ContourToMesh +from meshprocessoutsiderifts import meshprocessoutsiderifts +from GetAreas import GetAreas + +def meshprocessrifts(md,domainoutline): + """ + MESHPROCESSRIFTS - process mesh when rifts are present + + split rifts inside mesh (rifts are defined by presence of + segments inside the domain outline) + if domain outline is provided, check for rifts that could touch it, and open them up. + + Usage: + md=meshprocessrifts(md,domainoutline) + + Ex: + md=meshprocessrifts(md,'DomainOutline.exp'); + + """ + + #Call MEX file + [md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.segments,md.mesh.segmentmarkers,md.rifts.riftstruct]=TriMeshProcessRifts(md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.segments,md.mesh.segmentmarkers) + md.mesh.elements=md.mesh.elements.astype(int) + md.mesh.x=md.mesh.x.reshape(-1) + md.mesh.y=md.mesh.y.reshape(-1) + md.mesh.segments=md.mesh.segments.astype(int) + md.mesh.segmentmarkers=md.mesh.segmentmarkers.astype(int) + if not isinstance(md.rifts.riftstruct,list) or not md.rifts.riftstruct: + raise RuntimeError("TriMeshProcessRifts did not find any rift") + + #Fill in rest of fields: + numrifts=len(md.rifts.riftstruct) + md.mesh.numberofelements=numpy.size(md.mesh.elements,axis=0) + md.mesh.numberofvertices=numpy.size(md.mesh.x) + md.mesh.vertexonboundary=numpy.zeros(numpy.size(md.mesh.x),bool) + md.mesh.vertexonboundary[md.mesh.segments[:,0:2]-1]=True + + #get coordinates of rift tips + for rift in md.rifts.riftstruct: + rift['tip1coordinates']=numpy.hstack((md.mesh.x[rift['tips'][0,0].astype(int)-1].reshape(-1,1),md.mesh.y[rift['tips'][0,0].astype(int)-1].reshape(-1,1))) + rift['tip2coordinates']=numpy.hstack((md.mesh.x[rift['tips'][0,1].astype(int)-1].reshape(-1,1),md.mesh.y[rift['tips'][0,1].astype(int)-1].reshape(-1,1))) + + #In case we have rifts that open up the domain outline, we need to open them: + [flags,dum]=ContourToMesh(md.mesh.elements,md.mesh.x,md.mesh.y,domainoutline,'node',0) + found=0 + for rift in md.rifts.riftstruct: + if flags[rift['tips'][0,0].astype(int)-1]==0: + found=1 + break + if flags[rift['tips'][0,1].astype(int)-1]==0: + found=1 + break + if found: + md=meshprocessoutsiderifts(md,domainoutline) + + #get elements that are not correctly oriented in the correct direction: + aires=GetAreas(md.mesh.elements,md.mesh.x,md.mesh.y) + pos=numpy.nonzero(aires<0)[0] + md.mesh.elements[pos,:]=numpy.hstack((md.mesh.elements[pos,1].reshape(-1,1),md.mesh.elements[pos,0].reshape(-1,1),md.mesh.elements[pos,2].reshape(-1,1))) + + return md + Index: ../trunk-jpl/src/py3/mesh/roundmesh.py =================================================================== --- ../trunk-jpl/src/py3/mesh/roundmesh.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/roundmesh.py (revision 19895) @@ -0,0 +1,61 @@ +import numpy +import os +from collections import OrderedDict +from expwrite import expwrite +from triangle import triangle + +def roundmesh(md,radius,resolution): + """ + ROUNDMESH - create an unstructured round mesh + + This script will generate a structured round mesh + - radius : specifies the radius of the circle in meters + - resolution : specifies the resolution in meters + + Usage: + md=roundmesh(md,radius,resolution) + """ + + #First we have to create the domain outline + + #Get number of points on the circle + pointsonedge=numpy.floor((2.*numpy.pi*radius) / resolution) + + #Calculate the cartesians coordinates of the points + x_list=numpy.ones(pointsonedge) + y_list=numpy.ones(pointsonedge) + theta=numpy.linspace(0.,2.*numpy.pi,num=pointsonedge,endpoint=False) + x_list=roundsigfig(radius*x_list*numpy.cos(theta),12) + y_list=roundsigfig(radius*y_list*numpy.sin(theta),12) + A=OrderedDict() + A['x']=[x_list] + A['y']=[y_list] + A['density']=1. + expwrite(A,'RoundDomainOutline.exp') + + #Call Bamg + md=triangle(md,'RoundDomainOutline.exp',resolution) + #md=bamg(md,'domain','RoundDomainOutline.exp','hmin',resolution) + + #move the closest node to the center + pos=numpy.argmin(md.mesh.x**2+md.mesh.y**2) + md.mesh.x[pos]=0. + md.mesh.y[pos]=0. + + #delete domain + os.remove('RoundDomainOutline.exp') + + return md + +def roundsigfig(x,n): + + digits=numpy.ceil(numpy.log10(numpy.abs(x))) + x=x/10.**digits + x=numpy.round(x,decimals=n) + x=x*10.**digits + + pos=numpy.nonzero(numpy.isnan(x)) + x[pos]=0. + + return x + Index: ../trunk-jpl/src/py3/mesh/ElementsFromEdge.py =================================================================== --- ../trunk-jpl/src/py3/mesh/ElementsFromEdge.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/ElementsFromEdge.py (revision 19895) @@ -0,0 +1,24 @@ +import numpy +import PythonFuncs as p + +def ElementsFromEdge(elements,A,B): + """ + ELEMENTSFROMEDGE: find elements connected to one edge defined by nodes A and B + + Usage: edgeelements=ElementsFromEdge(elements,A,B) + + Eg: edgeelements=ElementsFromEdge(md.mesh.elements,tip1,tip2) + + """ + + edgeelements=numpy.nonzero(\ + p.logical_or_n(numpy.logical_and(elements[:,0]==A,elements[:,1]==B), \ + numpy.logical_and(elements[:,0]==A,elements[:,2]==B), \ + numpy.logical_and(elements[:,1]==A,elements[:,2]==B), \ + numpy.logical_and(elements[:,1]==A,elements[:,0]==B), \ + numpy.logical_and(elements[:,2]==A,elements[:,0]==B), \ + numpy.logical_and(elements[:,2]==A,elements[:,1]==B), \ + ))[0]+1 + + return edgeelements + Index: ../trunk-jpl/src/py3/mesh/triangle.py =================================================================== --- ../trunk-jpl/src/py3/mesh/triangle.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/triangle.py (revision 19895) @@ -0,0 +1,62 @@ +import numpy +from mesh2d import mesh2d +from TriMesh import TriMesh +from NodeConnectivity import NodeConnectivity +from ElementConnectivity import ElementConnectivity +import MatlabFuncs as m + +def triangle(md,domainname,*args): + """ + TRIANGLE - create model mesh using the triangle package + + This routine creates a model mesh using TriMesh and a domain outline, to within a certain resolution + where md is a @model object, domainname is the name of an Argus domain outline file, + and resolution is a characteristic length for the mesh (same unit as the domain outline + unit). Riftname is an optional argument (Argus domain outline) describing rifts. + + Usage: + md=triangle(md,domainname,resolution) + or md=triangle(md,domainname, resolution, riftname) + + Examples: + md=triangle(md,'DomainOutline.exp',1000); + md=triangle(md,'DomainOutline.exp',1000,'Rifts.exp'); + """ + + #Figure out a characteristic area. Resolution is a node oriented concept (ex a 1000m resolution node would + #be made of 1000*1000 area squares). + + if len(args)==1: + resolution=args[0] + riftname='' + if len(args)==2: + riftname=args[0] + resolution=args[1] + + #Check that mesh was not already run, and warn user: + if md.mesh.numberofelements: + choice = input('This model already has a mesh. Are you sure you want to go ahead? (y/n)') + if not m.strcmp(choice,'y'): + print('no meshing done ... exiting') + return None + + area = resolution**2 + + #Mesh using TriMesh + md.mesh=mesh2d() + [md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.segments,md.mesh.segmentmarkers]=TriMesh(domainname,riftname,area) + md.mesh.elements=md.mesh.elements.astype(int) + md.mesh.segments=md.mesh.segments.astype(int) + md.mesh.segmentmarkers=md.mesh.segmentmarkers.astype(int) + + #Fill in rest of fields: + md.mesh.numberofelements = numpy.size(md.mesh.elements,axis=0) + md.mesh.numberofvertices = numpy.size(md.mesh.x) + md.mesh.vertexonboundary = numpy.zeros(md.mesh.numberofvertices,bool) + md.mesh.vertexonboundary[md.mesh.segments[:,0:2]-1] = True + + #Now, build the connectivity tables for this mesh. + [md.mesh.vertexconnectivity] = NodeConnectivity(md.mesh.elements, md.mesh.numberofvertices) + [md.mesh.elementconnectivity] = ElementConnectivity(md.mesh.elements, md.mesh.vertexconnectivity) + + return md Index: ../trunk-jpl/src/py3/mesh/bamg.py =================================================================== --- ../trunk-jpl/src/py3/mesh/bamg.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/bamg.py (revision 19895) @@ -0,0 +1,486 @@ +import os.path +import numpy +from mesh2d import mesh2d +from collections import OrderedDict +from pairoptions import pairoptions +from bamggeom import bamggeom +from bamgmesh import bamgmesh +from expread import expread +from expwrite import expwrite +from SegIntersect import SegIntersect +import MatlabFuncs as m +from BamgMesher import BamgMesher +from ContourToNodes import ContourToNodes + +def bamg(md,*kwargs): + """ + BAMG - mesh generation + + Available options (for more details see ISSM website http://issm.jpl.nasa.gov/): + + - domain : followed by an ARGUS file that prescribes the domain outline + - hmin : minimum edge length (default is 10^-100) + - hmax : maximum edge length (default is 10^100) + - hVertices : imposed edge length for each vertex (geometry or mesh) + - hminVertices : minimum edge length for each vertex (mesh) + - hmaxVertices : maximum edge length for each vertex (mesh) + + - anisomax : maximum ratio between the smallest and largest edges (default is 10^30) + - coeff : coefficient applied to the metric (2-> twice as many elements, default is 1) + - cutoff : scalar used to compute the metric when metric type 2 or 3 are applied + - err : error used to generate the metric from a field + - errg : geometric error (default is 0.1) + - field : field of the model that will be used to compute the metric + to apply several fields, use one column per field + - gradation : maximum ratio between two adjacent edges + - Hessiantype : 0 -> use double P2 projection (default) + 1 -> use Green formula + - KeepVertices : try to keep initial vertices when adaptation is done on an existing mesh (default 1) + - MaxCornerAngle : maximum angle of corners in degree (default is 10) + - maxnbv : maximum number of vertices used to allocate memory (default is 10^6) + - maxsubdiv : maximum subdivision of exisiting elements (default is 10) + - metric : matrix (numberofnodes x 3) used as a metric + - Metrictype : 1 -> absolute error c/(err coeff^2) * Abs(H) (default) + 2 -> relative error c/(err coeff^2) * Abs(H)/max(s,cutoff*max(s)) + 3 -> rescaled absolute error c/(err coeff^2) * Abs(H)/(smax-smin) + - nbjacoby : correction used by Hessiantype=1 (default is 1) + - nbsmooth : number of metric smoothing procedure (default is 3) + - omega : relaxation parameter of the smoothing procedure (default is 1.8) + - power : power applied to the metric (default is 1) + - splitcorners : split triangles whuch have 3 vertices on the outline (default is 1) + - geometricalmetric : take the geometry into account to generate the metric (default is 0) + - verbose : level of verbosity (default is 1) + + - rifts : followed by an ARGUS file that prescribes the rifts + - toltip : tolerance to move tip on an existing point of the domain outline + - tracks : followed by an ARGUS file that prescribes the tracks that the mesh will stick to + - RequiredVertices : mesh vertices that are required. [x,y,ref]; ref is optional + - tol : if the distance between 2 points of the domain outline is less than tol, they + will be merged + + Examples: + md=bamg(md,'domain','DomainOutline.exp','hmax',3000); + md=bamg(md,'field',[md.inversion.vel_obs md.geometry.thickness],'hmax',20000,'hmin',1000); + md=bamg(md,'metric',A,'hmin',1000,'hmax',20000,'gradation',3,'anisomax',1); + """ + + #process options + options=pairoptions(**kwargs) +# options=deleteduplicates(options,1); + + #initialize the structures required as input of Bamg + bamg_options=OrderedDict() + bamg_geometry=bamggeom() + bamg_mesh=bamgmesh() + + # Bamg Geometry parameters {{{ + if options.exist('domain'): + + #Check that file exists + domainfile=options.getfieldvalue('domain') + if not os.path.exists(domainfile): + raise IOError("bamg error message: file '%s' not found" % domainfile) + domain=expread(domainfile) + + #Build geometry + count=0 + for i,domaini in enumerate(domain): + + #Check that the domain is closed + if (domaini['x'][0]!=domaini['x'][-1] or domaini['y'][0]!=domaini['y'][-1]): + raise RuntimeError("bamg error message: all contours provided in ''domain'' should be closed") + + #Checks that all holes are INSIDE the principle domain outline + if i: + flags=ContourToNodes(domaini['x'],domaini['y'],domainfile,0) + if numpy.any(numpy.logical_not(flags)): + raise RuntimeError("bamg error message: All holes should be strictly inside the principal domain") + + #Add all points to bamg_geometry + nods=domaini['nods']-1 #the domain are closed 0=end + bamg_geometry.Vertices=numpy.vstack((bamg_geometry.Vertices,numpy.hstack((domaini['x'][0:nods].reshape(-1,1),domaini['y'][0:nods].reshape(-1,1),numpy.ones((nods,1)))))) + bamg_geometry.Edges =numpy.vstack((bamg_geometry.Edges, numpy.hstack((numpy.arange(count+1,count+nods+1).reshape(-1,1),numpy.hstack((numpy.arange(count+2,count+nods+1),count+1)).reshape(-1,1),1.*numpy.ones((nods,1)))))) + if i: + bamg_geometry.SubDomains=numpy.vstack((bamg_geometry.SubDomains,[2,count+1,1,1])) + + #update counter + count+=nods + + #take care of rifts + if options.exist('rifts'): + + #Check that file exists + riftfile=options.getfieldvalue('rifts') + if not os.path.exists(riftfile): + raise IOError("bamg error message: file '%s' not found" % riftfile) + rift=expread(riftfile) + + for i,rifti in enumerate(rift): + + #detect whether all points of the rift are inside the domain + flags=ContourToNodes(rifti['x'],rifti['y'],domain[0],0) + if numpy.all(numpy.logical_not(flags)): + raise RuntimeError("one rift has all its points outside of the domain outline") + + elif numpy.any(numpy.logical_not(flags)): + #We LOTS of work to do + print("Rift tip outside of or on the domain has been detected and is being processed...") + + #check that only one point is outside (for now) + if numpy.sum(numpy.logical_not(flags).astype(int))!=1: + raise RuntimeError("bamg error message: only one point outside of the domain is supported yet") + + #Move tip outside to the first position + if not flags[0]: + #OK, first point is outside (do nothing), + pass + elif not flags[-1]: + rifti['x']=numpy.flipud(rifti['x']) + rifti['y']=numpy.flipud(rifti['y']) + else: + raise RuntimeError("bamg error message: only a rift tip can be outside of the domain") + + #Get cordinate of intersection point + x1=rifti['x'][0] + y1=rifti['y'][0] + x2=rifti['x'][1] + y2=rifti['y'][1] + for j in range(0,numpy.size(domain[0]['x'])-1): + if SegIntersect(numpy.array([[x1,y1],[x2,y2]]),numpy.array([[domain[0]['x'][j],domain[0]['y'][j]],[domain[0]['x'][j+1],domain[0]['y'][j+1]]])): + + #Get position of the two nodes of the edge in domain + i1=j + i2=j+1 + + #rift is crossing edge [i1 i2] of the domain + #Get coordinate of intersection point (http://mathworld.wolfram.com/Line-LineIntersection.html) + x3=domain[0]['x'][i1] + y3=domain[0]['y'][i1] + x4=domain[0]['x'][i2] + y4=domain[0]['y'][i2] +# x=det([det([x1 y1; x2 y2]) x1-x2;det([x3 y3; x4 y4]) x3-x4])/det([x1-x2 y1-y2;x3-x4 y3-y4]); +# y=det([det([x1 y1; x2 y2]) y1-y2;det([x3 y3; x4 y4]) y3-y4])/det([x1-x2 y1-y2;x3-x4 y3-y4]); + x=numpy.linalg.det(numpy.array([[numpy.linalg.det(numpy.array([[x1,y1],[x2,y2]])),x1-x2],[numpy.linalg.det(numpy.array([[x3,y3],[x4,y4]])),x3-x4]]))/numpy.linalg.det(numpy.array([[x1-x2,y1-y2],[x3-x4,y3-y4]])) + y=numpy.linalg.det(numpy.array([[numpy.linalg.det(numpy.array([[x1,y1],[x2,y2]])),y1-y2],[numpy.linalg.det(numpy.array([[x3,y3],[x4,y4]])),y3-y4]]))/numpy.linalg.det(numpy.array([[x1-x2,y1-y2],[x3-x4,y3-y4]])) + + segdis= sqrt((x4-x3)**2+(y4-y3)**2) + tipdis=numpy.array([sqrt((x-x3)**2+(y-y3)**2),sqrt((x-x4)**2+(y-y4)**2)]) + + if numpy.min(tipdis)/segdis < options.getfieldvalue('toltip',0): + print("moving tip-domain intersection point") + + #Get position of the closer point + if tipdis[0]>tipdis[1]: + pos=i2 + else: + pos=i1 + + #This point is only in Vertices (number pos). + #OK, now we can add our own rift + nods=rifti['nods']-1 + bamg_geometry.Vertices=numpy.vstack((bamg_geometry.Vertices,numpy.hstack((rifti['x'][1:].reshape(-1,1),rifti['y'][1:].reshape(-1,1),numpy.ones((nods,1)))))) + bamg_geometry.Edges=numpy.vstack((bamg_geometry.Edges,\ + numpy.array([[pos,count+1,(1+i)]]),\ + numpy.hstack((numpy.arange(count+1,count+nods).reshape(-1,1),numpy.arange(count+2,count+nods+1).reshape(-1,1),(1+i)*numpy.ones((nods-1,1)))))) + count+=nods + + break + + else: + #Add intersection point to Vertices + bamg_geometry.Vertices=numpy.vstack((bamg_geometry.Vertices,numpy.array([[x,y,1]]))) + count+=1 + + #Decompose the crossing edge into 2 subedges + pos=numpy.nonzero(numpy.logical_and(bamg_geometry.Edges[:,0]==i1,bamg_geometry.Edges[:,1]==i2))[0] + if not pos: + raise RuntimeError("bamg error message: a problem occurred...") + bamg_geometry.Edges=numpy.vstack((bamg_geometry.Edges[0:pos-1,:],\ + numpy.array([[bamg_geometry.Edges[pos,0],count ,bamg_geometry.Edges[pos,2]]]),\ + numpy.array([[count ,bamg_geometry.Edges[pos,1],bamg_geometry.Edges[pos,2]]]),\ + bamg_geometry.Edges[pos+1:,:])) + + #OK, now we can add our own rift + nods=rifti['nods']-1 + bamg_geometry.Vertices=numpy.vstack((bamg_geometry.Vertices,numpy.hstack((rifti['x'][1:].reshape(-1,1),rifti['y'][1:].reshape(-1,1),numpy.ones((nods,1)))))) + bamg_geometry.Edges=numpy.vstack((bamg_geometry.Edges,\ + numpy.array([[count,count+1,2]]),\ + numpy.hstack((numpy.arange(count+1,count+nods).reshape(-1,1),numpy.arange(count+2,count+nods+1).reshape(-1,1),(1+i)*numpy.ones((nods-1,1)))))) + count+=nods + + break + + else: + nods=rifti['nods']-1 + bamg_geometry.Vertices=numpy.vstack(bamg_geometry.Vertices, numpy.hstack(rifti['x'][:],rifti['y'][:],numpy.ones((nods+1,1)))) + bamg_geometry.Edges =numpy.vstack(bamg_geometry.Edges, numpy.hstack(numpy.arange(count+1,count+nods).reshape(-1,1),numpy.arange(count+2,count+nods+1).reshape(-1,1),i*numpy.ones((nods,1)))) + count=+nods+1 + + #Deal with tracks + if options.exist('tracks'): + + #read tracks + track=options.getfieldvalue('tracks') + if all(isinstance(track,str)): + A=expread(track) + track=numpy.hstack((A.x.reshape(-1,1),A.y.reshape(-1,1))) + else: + track=float(track) #for some reason, it is of class "single" + if numpy.size(track,axis=1)==2: + track=numpy.hstack((track,3.*numpy.ones((size(track,axis=0),1)))) + + #only keep those inside + flags=ContourToNodes(track[:,0],track[:,1],domainfile,0) + track=track[numpy.nonzero(flags),:] + + #Add all points to bamg_geometry + nods=numpy.size(track,axis=0) + bamg_geometry.Vertices=numpy.vstack((bamg_geometry.Vertices,track)) + bamg_geometry.Edges =numpy.vstack((bamg_geometry.Edges,numpy.hstack((numpy.arange(count+1,count+nods).reshape(-1,1),numpy.arange(count+2,count+nods+1).reshape(-1,1),3.*numpy.ones((nods-1,1)))))) + + #update counter + count+=nods + + #Deal with vertices that need to be kept by mesher + if options.exist('RequiredVertices'): + + #recover RequiredVertices + requiredvertices=options.getfieldvalue('RequiredVertices') #for some reason, it is of class "single" + if numpy.size(requiredvertices,axis=1)==2: + requiredvertices=numpy.hstack((requiredvertices,4.*numpy.ones((numpy.size(requiredvertices,axis=0),1)))) + + + #only keep those inside + flags=ContourToNodes(requiredvertices[:,0],requiredvertices[:,1],domainfile,0)[0] + requiredvertices=requiredvertices[numpy.nonzero(flags)[0],:] + + #Add all points to bamg_geometry + nods=numpy.size(requiredvertices,axis=0) + bamg_geometry.Vertices=numpy.vstack((bamg_geometry.Vertices,requiredvertices)) + + #update counter + count+=nods + + #process geom + #bamg_geometry=processgeometry(bamg_geometry,options.getfieldvalue('tol',float(nan)),domain[0]) + + elif isinstance(md.private.bamg,dict) and 'geometry' in md.private.bamg: + bamg_geometry=bamggeom(md.private.bamg['geometry'].__dict__) + else: + #do nothing... + pass + #}}} + # Bamg Mesh parameters {{{ + if not options.exist('domain') and md.mesh.numberofvertices and m.strcmp(md.mesh.elementtype(),'Tria'): + + if isinstance(md.private.bamg,dict) and 'mesh' in md.private.bamg: + bamg_mesh=bamgmesh(md.private.bamg['mesh'].__dict__) + else: + bamg_mesh.Vertices=numpy.hstack((md.mesh.x.reshape(-1,1),md.mesh.y.reshape(-1,1),numpy.ones((md.mesh.numberofvertices,1)))) + bamg_mesh.Triangles=numpy.hstack((md.mesh.elements,numpy.ones((md.mesh.numberofelements,1)))) + + if isinstance(md.rifts.riftstruct,dict): + raise TypeError("bamg error message: rifts not supported yet. Do meshprocessrift AFTER bamg") + #}}} + # Bamg Options {{{ + bamg_options['Crack']=options.getfieldvalue('Crack',0) + bamg_options['anisomax']=options.getfieldvalue('anisomax',10.**30) + bamg_options['coeff']=options.getfieldvalue('coeff',1.) + bamg_options['cutoff']=options.getfieldvalue('cutoff',10.**-5) + bamg_options['err']=options.getfieldvalue('err',numpy.array([[0.01]])) + bamg_options['errg']=options.getfieldvalue('errg',0.1) + bamg_options['field']=options.getfieldvalue('field',numpy.empty((0,1))) + bamg_options['gradation']=options.getfieldvalue('gradation',1.5) + bamg_options['Hessiantype']=options.getfieldvalue('Hessiantype',0) + bamg_options['hmin']=options.getfieldvalue('hmin',10.**-100) + bamg_options['hmax']=options.getfieldvalue('hmax',10.**100) + bamg_options['hminVertices']=options.getfieldvalue('hminVertices',numpy.empty((0,1))) + bamg_options['hmaxVertices']=options.getfieldvalue('hmaxVertices',numpy.empty((0,1))) + bamg_options['hVertices']=options.getfieldvalue('hVertices',numpy.empty((0,1))) + bamg_options['KeepVertices']=options.getfieldvalue('KeepVertices',1) + bamg_options['MaxCornerAngle']=options.getfieldvalue('MaxCornerAngle',10.) + bamg_options['maxnbv']=options.getfieldvalue('maxnbv',10**6) + bamg_options['maxsubdiv']=options.getfieldvalue('maxsubdiv',10.) + bamg_options['metric']=options.getfieldvalue('metric',numpy.empty((0,1))) + bamg_options['Metrictype']=options.getfieldvalue('Metrictype',0) + bamg_options['nbjacobi']=options.getfieldvalue('nbjacobi',1) + bamg_options['nbsmooth']=options.getfieldvalue('nbsmooth',3) + bamg_options['omega']=options.getfieldvalue('omega',1.8) + bamg_options['power']=options.getfieldvalue('power',1.) + bamg_options['splitcorners']=options.getfieldvalue('splitcorners',1) + bamg_options['geometricalmetric']=options.getfieldvalue('geometricalmetric',0) + bamg_options['random']=options.getfieldvalue('rand',True) + bamg_options['verbose']=options.getfieldvalue('verbose',1) + #}}} + + #call Bamg + [bamgmesh_out,bamggeom_out]=BamgMesher(bamg_mesh.__dict__,bamg_geometry.__dict__,bamg_options) + + # plug results onto model + md.private.bamg=OrderedDict() + md.private.bamg['mesh']=bamgmesh(bamgmesh_out) + md.private.bamg['geometry']=bamggeom(bamggeom_out) + md.mesh = mesh2d() + md.mesh.x=bamgmesh_out['Vertices'][:,0].copy() + md.mesh.y=bamgmesh_out['Vertices'][:,1].copy() + md.mesh.elements=bamgmesh_out['Triangles'][:,0:3].astype(int) + md.mesh.edges=bamgmesh_out['IssmEdges'].astype(int) + md.mesh.segments=bamgmesh_out['IssmSegments'][:,0:3].astype(int) + md.mesh.segmentmarkers=bamgmesh_out['IssmSegments'][:,3].astype(int) + + #Fill in rest of fields: + md.mesh.numberofelements=numpy.size(md.mesh.elements,axis=0) + md.mesh.numberofvertices=numpy.size(md.mesh.x) + md.mesh.numberofedges=numpy.size(md.mesh.edges,axis=0) + md.mesh.vertexonboundary=numpy.zeros(md.mesh.numberofvertices,bool) + md.mesh.vertexonboundary[md.mesh.segments[:,0:2]-1]=True + md.mesh.elementconnectivity=md.private.bamg['mesh'].ElementConnectivity + md.mesh.elementconnectivity[numpy.nonzero(numpy.isnan(md.mesh.elementconnectivity))]=0 + md.mesh.elementconnectivity=md.mesh.elementconnectivity.astype(int) + + #Check for orphan + if numpy.any(numpy.logical_not(numpy.in1d(numpy.arange(1,md.mesh.numberofvertices+1),md.mesh.elements.flat))): + raise RuntimeError("Output mesh has orphans. Decrease MaxCornerAngle to prevent outside points (ex: 0.01)") + + return md + +def processgeometry(geom,tol,outline): # {{{ + + raise RuntimeError("bamg.py/processgeometry is not complete.") + #Deal with edges + print("Checking Edge crossing...") + i=0 + while (ii) + geom.Edges[posedges]=geom.Edges[posedges]-1 + + #update counter + i-=1 + + if num: + print("WARNING: %d points outside the domain outline have been removed" % num) + + """ + %Check point spacing + if ~isnan(tol), + disp('Checking point spacing...'); + i=0; + while (ij); + geom.Edges(posedges)=geom.Edges(posedges)-1; + + %update counter + j=j-1; + + end + end + end + end + %remove empty edges + geom.Edges(find(geom.Edges(:,1)==geom.Edges(:,2)),:)=[]; + """ + return geom +# }}} + Index: ../trunk-jpl/src/py3/mesh/ComputeHessian.py =================================================================== --- ../trunk-jpl/src/py3/mesh/ComputeHessian.py (revision 0) +++ ../trunk-jpl/src/py3/mesh/ComputeHessian.py (revision 19895) @@ -0,0 +1,66 @@ +import numpy +from GetNodalFunctionsCoeff import GetNodalFunctionsCoeff +from GetAreas import GetAreas +import MatlabFuncs as m + +def ComputeHessian(index,x,y,field,type): + """ + COMPUTEHESSIAN - compute hessian matrix from a field + + Compute the hessian matrix of a given field + return the three components Hxx Hxy Hyy + for each element or each node + + Usage: + hessian=ComputeHessian(index,x,y,field,type) + + Example: + hessian=ComputeHessian(md.mesh.elements,md.mesh.x,md.mesh.y,md.inversion.vel_obs,'node') + """ + + #some variables + numberofnodes=numpy.size(x) + numberofelements=numpy.size(index,axis=0) + + #some checks + if numpy.size(field)!=numberofnodes and numpy.size(field)!=numberofelements: + raise TypeError("ComputeHessian error message: the given field size not supported yet") + if not m.strcmpi(type,'node') and not m.strcmpi(type,'element'): + raise TypeError("ComputeHessian error message: only 'node' or 'element' type supported yet") + + #initialization + line=index.reshape(-1,order='F') + linesize=3*numberofelements + + #get areas and nodal functions coefficients N(x,y)=alpha x + beta y + gamma + [alpha,beta,dum]=GetNodalFunctionsCoeff(index,x,y) + areas=GetAreas(index,x,y) + + #compute weights that hold the volume of all the element holding the node i + weights=m.sparse(line,numpy.ones((linesize,1)),numpy.tile(areas.reshape(-1,1),(3,1)),numberofnodes,1) + + #compute field on nodes if on elements + if numpy.size(field,axis=0)==numberofelements: + field=m.sparse(line,numpy.ones((linesize,1)),numpy.tile(areas*field,(3,1)),numberofnodes,1)/weights + + #Compute gradient for each element + grad_elx=numpy.sum(field[index-1,0]*alpha,axis=1) + grad_ely=numpy.sum(field[index-1,0]*beta,axis=1) + + #Compute gradient for each node (average of the elements around) + gradx=m.sparse(line,numpy.ones((linesize,1)),numpy.tile((areas*grad_elx).reshape(-1,1),(3,1)),numberofnodes,1) + grady=m.sparse(line,numpy.ones((linesize,1)),numpy.tile((areas*grad_ely).reshape(-1,1),(3,1)),numberofnodes,1) + gradx=gradx/weights + grady=grady/weights + + #Compute hessian for each element + hessian=numpy.hstack((numpy.sum(gradx[index-1,0]*alpha,axis=1).reshape(-1,1),numpy.sum(grady[index-1,0]*alpha,axis=1).reshape(-1,1),numpy.sum(grady[index-1,0]*beta,axis=1).reshape(-1,1))) + + if m.strcmpi(type,'node'): + #Compute Hessian on the nodes (average of the elements around) + hessian=numpy.hstack((m.sparse(line,numpy.ones((linesize,1)),numpy.tile((areas*hessian[:,0]).reshape(-1,1),(3,1)),numberofnodes,1)/weights, \ + m.sparse(line,numpy.ones((linesize,1)),numpy.tile((areas*hessian[:,1]).reshape(-1,1),(3,1)),numberofnodes,1)/weights, \ + m.sparse(line,numpy.ones((linesize,1)),numpy.tile((areas*hessian[:,2]).reshape(-1,1),(3,1)),numberofnodes,1)/weights )) + + return hessian + Index: ../trunk-jpl/src/py3/test.txt =================================================================== --- ../trunk-jpl/src/py3/test.txt (revision 0) +++ ../trunk-jpl/src/py3/test.txt (revision 19895) @@ -0,0 +1 @@ +truc Index: ../trunk-jpl/src/py3/solvers/issmmumpssolver.py =================================================================== --- ../trunk-jpl/src/py3/solvers/issmmumpssolver.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/issmmumpssolver.py (revision 19895) @@ -0,0 +1,29 @@ +import pairoptions + +def issmmumpssolver(**kwargs): + #ISSMSOLVE - return issm solver options + # + # Usage: + # options=issmsolver; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + options=[['toolkit','issm'],['mat_type','mpidense'],['vec_type','mpi'],['solver_type','mumps']]; + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/asmoptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/asmoptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/asmoptions.py (revision 19895) @@ -0,0 +1,29 @@ +import pairoptions + +def asmoptions(*args): + #ASMOPTIONS - return ASM petsc options + # + # Usage: + # options=asmoptions; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + options=[['toolkit','petsc'],['mat_type','aij'],['ksp_type','gmres'],['pc_type','asm'],['sub_pc_type','lu'],['pc_asm_overlap',3],['ksp_max_it',100],['ksp_rtol',1e-30]]; + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/issmgslsolver.py =================================================================== --- ../trunk-jpl/src/py3/solvers/issmgslsolver.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/issmgslsolver.py (revision 19895) @@ -0,0 +1,29 @@ +import pairoptions + +def issmgslsolver(**kwargs): + #ISSMSOLVE - return issm solver options + # + # Usage: + # options=issmsolver; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + options=[['toolkit','issm'],['mat_type','dense'],['vec_type','seq'],['solver_type','gsl']]; + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/matlaboptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/matlaboptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/matlaboptions.py (revision 19895) @@ -0,0 +1,29 @@ +import pairoptions + +def matlaboptions(**kwargs): + #MATLABOPTIONS - return Matlab petsc options + # + # Usage: + # options=matlaboptions; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + options=[['toolkit','petsc'],['ksp_type','matlab']]; + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/jacobicgoptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/jacobicgoptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/jacobicgoptions.py (revision 19895) @@ -0,0 +1,29 @@ +import pairoptions + +def jacobicgoptions(*args): + #ASMOPTIONS - return Additive Shwartz Method with Jacobi preconditioner petsc options + # + # Usage: + # options=jacobicgoptions; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + options=[['toolkit','petsc'],['mat_type','aij'],['ksp_type','cg'],['ksp_max_it',100],['ksp_rtol',1e-15]]; + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/mumpsoptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/mumpsoptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/mumpsoptions.py (revision 19895) @@ -0,0 +1,36 @@ +from collections import OrderedDict +import pairoptions +from IssmConfig import IssmConfig + +def mumpsoptions(**kwargs): + """ + MUMPSOPTIONS - return MUMPS direct solver petsc options + + Usage: + options=mumpsoptions; + """ + + #retrieve options provided in varargin + options=pairoptions.pairoptions(**kwargs) + mumps=OrderedDict() + + #default mumps options + PETSC_VERSION=IssmConfig('_PETSC_MAJOR_')[0] + if PETSC_VERSION==2.: + mumps['toolkit']='petsc' + mumps['mat_type']=options.getfieldvalue('mat_type','aijmumps') + mumps['ksp_type']=options.getfieldvalue('ksp_type','preonly') + mumps['pc_type']=options.getfieldvalue('pc_type','lu') + mumps['mat_mumps_icntl_14']=options.getfieldvalue('mat_mumps_icntl_14',120) + mumps['pc_factor_shift_positive_definite']=options.getfieldvalue('pc_factor_shift_positive_definite','true') + if PETSC_VERSION==3.: + mumps['toolkit']='petsc' + mumps['mat_type']=options.getfieldvalue('mat_type','mpiaij') + mumps['ksp_type']=options.getfieldvalue('ksp_type','preonly') + mumps['pc_type']=options.getfieldvalue('pc_type','lu') + mumps['pc_factor_mat_solver_package']=options.getfieldvalue('pc_factor_mat_solver_package','mumps') + mumps['mat_mumps_icntl_14']=options.getfieldvalue('mat_mumps_icntl_14',120) + mumps['pc_factor_shift_positive_definite']=options.getfieldvalue('pc_factor_shift_positive_definite','true') + + return mumps + Index: ../trunk-jpl/src/py3/solvers/soroptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/soroptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/soroptions.py (revision 19895) @@ -0,0 +1,29 @@ +import pairoptions + +def soroptions(**kwargs): + #SOROPTIONS - return Relaxation Solver petsc options + # + # Usage: + # options=soroptions; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + options=[['toolkit','petsc'],['mat_type','aij'],['ksp_type','cg'],['pc_type','sor'],['pc_sor_omega',1.1],['pc_sor_its',2]]; + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/jacobiasmoptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/jacobiasmoptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/jacobiasmoptions.py (revision 19895) @@ -0,0 +1,29 @@ +import pairoptions + +def jacobiasmoptions(**kwargs): + #ASMOPTIONS - return Additive Shwartz Method with Jacobi preconditioner petsc options + # + # Usage: + # options=jacobiasmoptions; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + options=[['toolkit','petsc'],['mat_type','aij'],['ksp_type','gmres'],['pc_type','asm'],['sub_pc_type','jacobi'],['pc_asm_overlap',3],['ksp_max_it',100],['ksp_rtol',1e-15]]; + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/stokesoptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/stokesoptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/stokesoptions.py (revision 19895) @@ -0,0 +1,39 @@ +import pairoptions +from IssmConfig import IssmConfig + +def stokesoptions(**kwargs): + #STOKESOPTIONS - return STOKES multi-physics solver petsc options + # + # Usage: + # options=stokesoptions; + + #retrieve options provided in varargin + arguments=pairoptions.pairoptions(**kwargs) + + + #default stokes options + PETSC_VERSION=IssmConfig('_PETSC_MAJOR_')[0] + + if PETSC_VERSION==2.: + raise RuntimeError('stokesoptions error message: multi-physics options not supported in Petsc 2') + if PETSC_VERSION==3.: + options=[['toolkit','petsc'],['mat_type','mpiaij'],['ksp_max_it',1000],['ksp_type','gmres'],['pc_type','fieldsplit'],['pc_field_split_type','schur'],\ + ['fieldsplit_0_pc_type','hypre'],['fieldsplit_0_ksp_type','gmres'],['fieldsplit_0_pc_hypre_type','boomerang'],\ + ['fieldsplit_1_pc_type','jacobi'],['fieldsplit_1_ksp_type','preonly'],['issm_option_solver','stokes']] + + #now, go through our arguments, and write over default options. + for i in range(len(arguments.list)): + arg1=arguments.list[i][0] + arg2=arguments.list[i][1] + found=0; + for j in range(len(options)): + joption=options[j][0] + if joption==arg1: + joption[1]=arg2; + options[j]=joption; + found=1; + break + if not found: + #this option did not exist, add it: + options.append([arg1,arg2]) + return options Index: ../trunk-jpl/src/py3/solvers/iluasmoptions.py =================================================================== --- ../trunk-jpl/src/py3/solvers/iluasmoptions.py (revision 0) +++ ../trunk-jpl/src/py3/solvers/iluasmoptions.py (revision 19895) @@ -0,0 +1,27 @@ +from collections import OrderedDict +import pairoptions + +def iluasmoptions(*args): + """ + ILUASMOPTIONS - + + Usage: + options=iluasmoptions; + """ + + #retrieve options provided in varargin + options=pairoptions.pairoptions(**kwargs) + iluasm=OrderedDict() + + #default iluasm options + iluasm['toolkit']='petsc' + iluasm['mat_type']=options.getfieldvalue('mat_type','aij') + iluasm['ksp_type']=options.getfieldvalue('ksp_type','gmres') + iluasm['pc_type']=options.getfieldvalue('pc_type','asm') + iluasm['sub_pc_type']=options.getfieldvalue('sub_pc_type','ilu') + iluasm['pc_asm_overlap']=options.getfieldvalue('pc_asm_overlap',5) + iluasm['ksp_max_it']=options.getfieldvalue('ksp_max_it',100) + iluasm['ksp_rtol']=options.getfieldvalue('ksp_rtol',1e-15) + + return iluasm + Index: ../trunk-jpl/src/py3/classes/massfluxatgate.py =================================================================== --- ../trunk-jpl/src/py3/classes/massfluxatgate.py (revision 0) +++ ../trunk-jpl/src/py3/classes/massfluxatgate.py (revision 19895) @@ -0,0 +1,71 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from pairoptions import pairoptions +from checkfield import checkfield +from WriteData import WriteData +from MeshProfileIntersection import MeshProfileIntersection +import os + +class massfluxatgate(object): + """ + MASSFLUXATEGATE class definition + + Usage: + massfluxatgate=massfluxatgate('GateName','PathToExpFile') + """ + + def __init__(self,**kwargs): # {{{ + + self.name = '' + self.definitionenum = 0 + self.profilename = '' + self.segments = float('NaN') + + #set defaults + self.setdefaultparameters() + + #use provided options to change fields + options=pairoptions(**kwargs) + + #OK get other fields + self=options.AssignObjectFields(self) + + #}}} + def __repr__(self): # {{{ + + string=" Massfluxatgate:" + string="%s\n%s"%(string,fielddisplay(self,'name','identifier for this massfluxatgate response')) + string="%s\n%s"%(string,fielddisplay(self,'definitionenum','enum that identifies this output definition uniquely, from Outputdefinition[1-10]Enum')) + string="%s\n%s"%(string,fielddisplay(self,'profilename','name of file (shapefile or argus file) defining a profile (or gate)')) + return string + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if not isinstance(self.name, str): + raise RuntimeError("massfluxatgate error message: 'name' field should be a string!") + + if not isinstance(self.profilename, str): + raise RuntimeError("massfluxatgate error message: 'profilename' field should be a string!") + + md = checkfield(md,'field',self.definitionenum,'values',[Outputdefinition1Enum(),Outputdefinition2Enum(),Outputdefinition3Enum(),Outputdefinition4Enum(),Outputdefinition5Enum(),Outputdefinition6Enum(),Outputdefinition7Enum(),Outputdefinition8Enum(),Outputdefinition9Enum(),Outputdefinition10Enum()]) + + #check the profilename points to a file!: + if not os.path.isfile(self.profilename): + raise RuntimeError("massfluxatgate error message: file name for profile corresponding to gate does not point to a legitimate file on disk!") + + return md + # }}} + def marshall(self,md,fid): # {{{ + + #before marshalling, we need to create the segments out of the profilename: + self.segments=MeshProfileIntersection(md.mesh.elements,md.mesh.x,md.mesh.y,self.profilename)[0] + + #ok, marshall name and segments: + WriteData(fid,'object',self,'fieldname','name','format','String') + WriteData(fid,'object',self,'fieldname','definitionenum','format','Integer') + WriteData(fid,'object',self,'fieldname','segments','format','DoubleMat','mattype',1) + + # }}} Property changes on: ../trunk-jpl/src/py3/classes/massfluxatgate.py ___________________________________________________________________ Added: svn:executable + * Index: ../trunk-jpl/src/py3/classes/SMBforcing.py =================================================================== --- ../trunk-jpl/src/py3/classes/SMBforcing.py (revision 0) +++ ../trunk-jpl/src/py3/classes/SMBforcing.py (revision 19895) @@ -0,0 +1,68 @@ +import numpy +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +from project3d import project3d + +class SMBforcing(object): + """ + SMBforcing Class definition + + Usage: + SMB=SMBforcing(); + """ + + def __init__(self): # {{{ + self.mass_balance = float('NaN') + self.requested_outputs = [] + #}}} + def __repr__(self): # {{{ + string=" surface forcings parameters:" + string="%s\n%s"%(string,fielddisplay(self,'mass_balance','surface mass balance [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + return string + #}}} + def extrude(self,md): # {{{ + + self.mass_balance=project3d(md,'vector',self.mass_balance,'type','node'); + return self + #}}} + def defaultoutputs(self,md): # {{{ + return [] + #}}} + def initialize(self,md): # {{{ + + if numpy.all(numpy.isnan(self.mass_balance)): + self.mass_balance=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMBforcing.mass_balance specified: values set as zero") + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.mass_balance','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.mass_balance','size',[md.mesh.numberofvertices],'NaN',1) + + md = checkfield(md,'fieldname','masstransport.requested_outputs','stringrow',1) + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',SmbEnum(),'data',SMBforcingEnum(),'format','Integer'); + WriteData(fid,'object',self,'class','smb','fieldname','mass_balance','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',SmbRequestedOutputsEnum(),'format','StringArray') + + # }}} Index: ../trunk-jpl/src/py3/classes/calvinglevermann.py =================================================================== --- ../trunk-jpl/src/py3/classes/calvinglevermann.py (revision 0) +++ ../trunk-jpl/src/py3/classes/calvinglevermann.py (revision 19895) @@ -0,0 +1,68 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData + +class calvinglevermann(object): + """ + CALVINGLEVERMANN class definition + + Usage: + calvinglevermann=calvinglevermann(); + """ + + def __init__(self): # {{{ + + self.stabilization = 0 + self.spclevelset = float('NaN') + self.coeff = float('NaN') + self.meltingrate = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' Calving Levermann parameters:' + string="%s\n%s"%(string,fielddisplay(self,'spclevelset','levelset constraints (NaN means no constraint)')) + string="%s\n%s"%(string,fielddisplay(self,'stabilization','0: no, 1: artificial_diffusivity, 2: streamline upwinding')) + string="%s\n%s"%(string,fielddisplay(self,'coeff','proportionality coefficient in Levermann model')) + string="%s\n%s"%(string,fielddisplay(self,'meltingrate','melting rate at given location [m/a]')) + + return string + #}}} + def extrude(self,md): # {{{ + self.spclevelset=project3d(md,'vector',self.spclevelset,'type','node') + self.coeff=project3d(md,'vector',self.coeff,'type','node') + self.meltingrate=project3d(md,'vector',self.meltingrate,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + + #stabilization = 2 by default + self.stabilization = 2 + + #Proportionality coefficient in Levermann model + self.coeff=2e13; + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if (solution!=TransientSolutionEnum()) or (not md.transient.iscalving): + return md + + md = checkfield(md,'fieldname','calving.spclevelset','timeseries',1) + md = checkfield(md,'fieldname','calving.stabilization','values',[0,1,2]); + md = checkfield(md,'fieldname','calving.coeff','size',[md.mesh.numberofvertices],'>',0) + md = checkfield(md,'fieldname','calving.meltingrate','NaN',1,'size',[md.mesh.numberofvertices],'>=',0) + return md + # }}} + def marshall(self,md,fid): # {{{ + yts=365.*24.*3600. + WriteData(fid,'enum',CalvingLawEnum(),'data',CalvingLevermannEnum(),'format','Integer'); + WriteData(fid,'enum',LevelsetStabilizationEnum(),'data',self.stabilization,'format','Integer'); + WriteData(fid,'enum',SpcLevelsetEnum(),'data',self.spclevelset,'format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1); + WriteData(fid,'enum',CalvinglevermannCoeffEnum(),'data',self.coeff,'format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','meltingrate','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'scale',1./yts) + # }}} Index: ../trunk-jpl/src/py3/classes/mask.py =================================================================== --- ../trunk-jpl/src/py3/classes/mask.py (revision 0) +++ ../trunk-jpl/src/py3/classes/mask.py (revision 19895) @@ -0,0 +1,63 @@ +import numpy +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class mask(object): + """ + MASK class definition + + Usage: + mask=mask(); + """ + + def __init__(self): # {{{ + self.ice_levelset = float('NaN') + self.groundedice_levelset = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" masks:" + + string="%s\n%s"%(string,fielddisplay(self,"groundedice_levelset","is ice grounded ? grounded ice if > 0, grounding line position if = 0, floating ice if < 0")) + string="%s\n%s"%(string,fielddisplay(self,"ice_levelset","presence of ice if < 0, icefront position if = 0, no ice if > 0")) + return string + #}}} + def extrude(self,md): # {{{ + self.ice_levelset=project3d(md,'vector',self.ice_levelset,'type','node') + self.groundedice_levelset=project3d(md,'vector',self.groundedice_levelset,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,'fieldname','mask.ice_levelset' ,'size',[md.mesh.numberofvertices]) + isice=numpy.array(md.mask.ice_levelset<=0,int) + if numpy.sum(isice)==0: + raise TypeError("no ice present in the domain") + + icefront=numpy.sum(md.mask.ice_levelset[md.mesh.elements-1]==0,axis=1) + if (max(icefront)==3 and m.strcmp(md.mesh.elementtype(),'Tria')) or (max(icefront==6) and m.strcmp(md.mesh.elementtype(),'Penta')): + raise TypeError("At least one element has all nodes on ice front, change md.mask.ice_levelset to fix it") + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','groundedice_levelset','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','ice_levelset','format','DoubleMat','mattype',1) + + # get mask of vertices of elements with ice + isice=numpy.array(md.mask.ice_levelset<0.,int) + vlist = numpy.zeros((md.mesh.numberofvertices,1), dtype=int) + pos=numpy.nonzero(numpy.sum(isice[md.mesh.elements-1],axis=1))[0] + vlist[md.mesh.elements[pos,:]-1]=1 + WriteData(fid,'data',vlist,'enum',IceMaskNodeActivationEnum(),'format','DoubleMat','mattype',1); + # }}} Index: ../trunk-jpl/src/py3/classes/radaroverlay.py =================================================================== --- ../trunk-jpl/src/py3/classes/radaroverlay.py (revision 0) +++ ../trunk-jpl/src/py3/classes/radaroverlay.py (revision 19895) @@ -0,0 +1,29 @@ +from fielddisplay import fielddisplay + +class radaroverlay(object): + """ + RADAROVERLAY class definition + + Usage: + radaroverlay=radaroverlay(); + """ + + def __init__(self): # {{{ + self.pwr = float('NaN') + self.x = float('NaN') + self.y = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' radaroverlay parameters:' + string="%s\n%s"%(string,fielddisplay(self,'pwr','radar power image (matrix)')) + string="%s\n%s"%(string,fielddisplay(self,'x','corresponding x coordinates [m]')) + string="%s\n%s"%(string,fielddisplay(self,'y','corresponding y coordinates [m]')) + return string + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} Index: ../trunk-jpl/src/py3/classes/m1qn3inversion.py =================================================================== --- ../trunk-jpl/src/py3/classes/m1qn3inversion.py (revision 0) +++ ../trunk-jpl/src/py3/classes/m1qn3inversion.py (revision 19895) @@ -0,0 +1,198 @@ +import numpy +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData +from supportedcontrols import supportedcontrols +from supportedcostfunctions import supportedcostfunctions +from marshallcostfunctions import marshallcostfunctions + +class m1qn3inversion(object): + ''' + M1QN3 class definition + + Usage: + m1qn3inversion=m1qn3inversion() + ''' + + def __init__(self,*args): # {{{ + + if not len(args): + print('empty init') + self.iscontrol = 0 + self.incomplete_adjoint = 0 + self.control_parameters = float('NaN') + self.control_scaling_factors = float('NaN') + self.maxsteps = 0 + self.maxiter = 0 + self.dxmin = 0. + self.gttol = 0. + self.cost_functions = float('NaN') + self.cost_functions_coefficients = float('NaN') + self.min_parameters = float('NaN') + self.max_parameters = float('NaN') + self.vx_obs = float('NaN') + self.vy_obs = float('NaN') + self.vz_obs = float('NaN') + self.vel_obs = float('NaN') + self.thickness_obs = float('NaN') + + #set defaults + self.setdefaultparameters() + elif len(args)==1 and args[0].__module__=='inversion': + print('converting inversion to m1qn3inversion') + inv=args[0] + #first call setdefaultparameters: + self.setdefaultparameters() + + #then go fish whatever is available in the inversion object provided to the constructor + self.iscontrol = inv.iscontrol + self.incomplete_adjoint = inv.incomplete_adjoint + self.control_parameters = inv.control_parameters + self.maxsteps = inv.nsteps + self.cost_functions = inv.cost_functions + self.cost_functions_coefficients = inv.cost_functions_coefficients + self.min_parameters = inv.min_parameters + self.max_parameters = inv.max_parameters + self.vx_obs = inv.vx_obs + self.vy_obs = inv.vy_obs + self.vz_obs = inv.vz_obs + self.vel_obs = inv.vel_obs + self.thickness_obs = inv.thickness_obs + else: + raise Exception('constructor not supported') + #}}} + def __repr__(self): # {{{ + string=' m1qn3inversion parameters:' + string="%s\n%s"%(string,fielddisplay(self,'iscontrol','is inversion activated?')) + string="%s\n%s"%(string,fielddisplay(self,'incomplete_adjoint','1: linear viscosity, 0: non-linear viscosity')) + string="%s\n%s"%(string,fielddisplay(self,'control_parameters','ex: [''FrictionCoefficient''], or [''MaterialsRheologyBbar'']')) + string="%s\n%s"%(string,fielddisplay(self,'control_scaling_factors','order of magnitude of each control (useful for multi-parameter optimization)')) + string="%s\n%s"%(string,fielddisplay(self,'maxsteps','maximum number of iterations (gradient computation)')) + string="%s\n%s"%(string,fielddisplay(self,'maxiter','maximum number of Function evaluation (forward run)')) + string="%s\n%s"%(string,fielddisplay(self,'dxmin','convergence criterion: two points less than dxmin from eachother (sup-norm) are considered identical')) + string="%s\n%s"%(string,fielddisplay(self,'gttol','||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)')) + string="%s\n%s"%(string,fielddisplay(self,'cost_functions','indicate the type of response for each optimization step')) + string="%s\n%s"%(string,fielddisplay(self,'cost_functions_coefficients','cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) + string="%s\n%s"%(string,fielddisplay(self,'min_parameters','absolute minimum acceptable value of the inversed parameter on each vertex')) + string="%s\n%s"%(string,fielddisplay(self,'max_parameters','absolute maximum acceptable value of the inversed parameter on each vertex')) + string="%s\n%s"%(string,fielddisplay(self,'vx_obs','observed velocity x component [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'vy_obs','observed velocity y component [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'vel_obs','observed velocity magnitude [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'thickness_obs','observed thickness [m]')) + string="%s\n%s"%(string,'Available cost functions:') + string="%s\n%s"%(string,' 101: SurfaceAbsVelMisfit') + string="%s\n%s"%(string,' 102: SurfaceRelVelMisfit') + string="%s\n%s"%(string,' 103: SurfaceLogVelMisfit') + string="%s\n%s"%(string,' 104: SurfaceLogVxVyMisfit') + string="%s\n%s"%(string,' 105: SurfaceAverageVelMisfit') + string="%s\n%s"%(string,' 201: ThicknessAbsMisfit') + string="%s\n%s"%(string,' 501: DragCoefficientAbsGradient') + string="%s\n%s"%(string,' 502: RheologyBbarAbsGradient') + string="%s\n%s"%(string,' 503: ThicknessAbsGradient') + return string + #}}} + def extrude(self,md): # {{{ + self.vx_obs=project3d(md,'vector',self.vx_obs,'type','node') + self.vy_obs=project3d(md,'vector',self.vy_obs,'type','node') + self.vel_obs=project3d(md,'vector',self.vel_obs,'type','node') + self.thickness_obs=project3d(md,'vector',self.thickness_obs,'type','node') + if not numpy.any(numpy.isnan(self.cost_functions_coefficients)): + self.cost_functions_coefficients=project3d(md,'vector',self.cost_functions_coefficients,'type','node') + if not numpy.any(numpy.isnan(self.min_parameters)): + self.min_parameters=project3d(md,'vector',self.min_parameters,'type','node') + if not numpy.any(numpy.isnan(self.max_parameters)): + self.max_parameters=project3d(md,'vector',self.max_parameters,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + + #default is incomplete adjoint for now + self.incomplete_adjoint=1 + + #parameter to be inferred by control methods (only + #drag and B are supported yet) + self.control_parameters='FrictionCoefficient' + + #Scaling factor for each control + self.control_scaling_factors=1 + + #number of iterations + self.maxsteps=20 + self.maxiter=40 + + #several responses can be used: + self.cost_functions=101 + + #m1qn3 parameters + self.dxmin = 0.1 + self.gttol = 1e-4 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if not self.iscontrol: + return md + + num_controls=numpy.size(md.inversion.control_parameters) + num_costfunc=numpy.size(md.inversion.cost_functions) + + md = checkfield(md,'fieldname','inversion.iscontrol','values',[0,1]) + md = checkfield(md,'fieldname','inversion.incomplete_adjoint','values',[0,1]) + md = checkfield(md,'fieldname','inversion.control_parameters','cell',1,'values',supportedcontrols()) + md = checkfield(md,'fieldname','inversion.control_scaling_factors','size',[num_controls],'>',0,'NaN',1) + md = checkfield(md,'fieldname','inversion.maxsteps','numel',[1],'>=',0) + md = checkfield(md,'fieldname','inversion.maxiter','numel',[1],'>=',0) + md = checkfield(md,'fieldname','inversion.dxmin','numel',[1],'>',0.) + md = checkfield(md,'fieldname','inversion.gttol','numel',[1],'>',0.) + md = checkfield(md,'fieldname','inversion.cost_functions','size',[num_costfunc],'values',supportedcostfunctions()) + md = checkfield(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices,num_costfunc],'>=',0) + md = checkfield(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices,num_controls]) + md = checkfield(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices,num_controls]) + + if solution==BalancethicknessSolutionEnum(): + md = checkfield(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices],'NaN',1) + else: + md = checkfield(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices],'NaN',1) + md = checkfield(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices],'NaN',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'object',self,'class','inversion','fieldname','iscontrol','format','Boolean') + WriteData(fid,'enum',InversionTypeEnum(),'data',2,'format','Integer') + if not self.iscontrol: + return + WriteData(fid,'object',self,'class','inversion','fieldname','incomplete_adjoint','format','Boolean') + WriteData(fid,'object',self,'class','inversion','fieldname','control_scaling_factors','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'class','inversion','fieldname','maxsteps','format','Integer') + WriteData(fid,'object',self,'class','inversion','fieldname','maxiter','format','Integer') + WriteData(fid,'object',self,'class','inversion','fieldname','dxmin','format','Double') + WriteData(fid,'object',self,'class','inversion','fieldname','gttol','format','Double') + WriteData(fid,'object',self,'class','inversion','fieldname','cost_functions_coefficients','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','inversion','fieldname','min_parameters','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'class','inversion','fieldname','max_parameters','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'class','inversion','fieldname','vx_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'class','inversion','fieldname','vy_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'class','inversion','fieldname','vz_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'class','inversion','fieldname','thickness_obs','format','DoubleMat','mattype',1) + + #process control parameters + num_control_parameters=len(self.control_parameters) + data=numpy.array([StringToEnum(control_parameter)[0] for control_parameter in self.control_parameters]).reshape(1,-1) + WriteData(fid,'data',data,'enum',InversionControlParametersEnum(),'format','DoubleMat','mattype',3) + WriteData(fid,'data',num_control_parameters,'enum',InversionNumControlParametersEnum(),'format','Integer') + + #process cost functions + num_cost_functions=numpy.size(self.cost_functions) + data=marshallcostfunctions(self.cost_functions) + WriteData(fid,'data',numpy.array(data).reshape(1,-1),'enum',InversionCostFunctionsEnum(),'format','DoubleMat','mattype',3) + WriteData(fid,'data',num_cost_functions,'enum',InversionNumCostFunctionsEnum(),'format','Integer') + # }}} Index: ../trunk-jpl/src/py3/classes/thermal.py =================================================================== --- ../trunk-jpl/src/py3/classes/thermal.py (revision 0) +++ ../trunk-jpl/src/py3/classes/thermal.py (revision 19895) @@ -0,0 +1,130 @@ +import numpy +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class thermal(object): + """ + THERMAL class definition + + Usage: + thermal=thermal(); + """ + + def __init__(self): # {{{ + self.spctemperature = float('NaN') + self.penalty_threshold = 0 + self.stabilization = 0 + self.reltol = 0 + self.maxiter = 0 + self.penalty_lock = 0 + self.penalty_factor = 0 + self.isenthalpy = 0 + self.isdynamicbasalspc = 0; + self.requested_outputs = [] + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' Thermal solution parameters:' + string="%s\n%s"%(string,fielddisplay(self,'spctemperature','temperature constraints (NaN means no constraint) [K]')) + string="%s\n%s"%(string,fielddisplay(self,'stabilization','0: no, 1: artificial_diffusivity, 2: SUPG')) + string="%s\n%s"%(string,fielddisplay(self,'maxiter','maximum number of non linear iterations')) + string="%s\n%s"%(string,fielddisplay(self,'reltol','relative tolerance criterion')) + string="%s\n%s"%(string,fielddisplay(self,'penalty_lock','stabilize unstable thermal constraints that keep zigzagging after n iteration (default is 0, no stabilization)')) + string="%s\n%s"%(string,fielddisplay(self,'penalty_threshold','threshold to declare convergence of thermal solution (default is 0)')) + string="%s\n%s"%(string,fielddisplay(self,'isenthalpy','use an enthalpy formulation to include temperate ice (default is 0)')) + string="%s\n%s"%(string,fielddisplay(self,'isdynamicbasalspc','enable dynamic setting of basal forcing. required for enthalpy formulation (default is 0)')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + return string + #}}} + def extrude(self,md): # {{{ + self.spctemperature=project3d(md,'vector',self.spctemperature,'type','node','layer',md.mesh.numberoflayers,'padding',numpy.nan) + if isinstance(md.initialization.temperature,numpy.ndarray) and numpy.size(md.initialization.temperature,axis=0)==md.mesh.numberofvertices: + self.spctemperature=numpy.nan*numpy.ones((md.mesh.numberofvertices,1)) + pos=numpy.nonzero(md.mesh.vertexonsurface)[0] + self.spctemperature[pos]=md.initialization.temperature[pos] #impose observed temperature on surface + return self + #}}} + def defaultoutputs(self,md): # {{{ + + if self.isenthalpy: + return ['Enthalpy','Temperature','Waterfraction','Watercolumn','BasalforcingsGroundediceMeltingRate'] + else: + return ['Temperature','BasalforcingsGroundediceMeltingRate'] + + #}}} + def setdefaultparameters(self): # {{{ + + #Number of unstable constraints acceptable + self.penalty_threshold=0 + + #Type of stabilization used + self.stabilization=1 + + #Relative tolerance for the enthalpy convergence + self.reltol=0.01 + + #Maximum number of iterations + self.maxiter=100 + + #factor used to compute the values of the penalties: kappa=max(stiffness matrix)*10^penalty_factor + self.penalty_factor=3 + + #Should we use cold ice (default) or enthalpy formulation + self.isenthalpy=0 + + #will basal boundary conditions be set dynamically + self.isdynamicbasalspc=0; + + #default output + self.requested_outputs=['default'] + return self + + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if (ThermalAnalysisEnum() not in analyses and EnthalpyAnalysisEnum() not in analyses) or (solution==TransientSolutionEnum() and not md.transient.isthermal): + return md + + md = checkfield(md,'fieldname','thermal.stabilization','numel',[1],'values',[0,1,2]) + md = checkfield(md,'fieldname','thermal.spctemperature','timeseries',1) + if EnthalpyAnalysisEnum() in analyses and md.thermal.isenthalpy and md.mesh.dimension()==3: + pos=numpy.nonzero(numpy.logical_not(numpy.isnan(md.thermal.spctemperature[0:md.mesh.numberofvertices]))) + replicate=numpy.tile(md.geometry.surface-md.mesh.z,(1,numpy.size(md.thermal.spctemperature,axis=1))) + md = checkfield(md,'fieldname','thermal.spctemperature[numpy.nonzero(numpy.logical_not(numpy.isnan(md.thermal.spctemperature[0:md.mesh.numberofvertices,:])))]','<',md.materials.meltingpoint-md.materials.beta*md.materials.rho_ice*md.constants.g*replicate[pos],'message',"spctemperature should be below the adjusted melting point") + md = checkfield(md,'fieldname','thermal.isenthalpy','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','thermal.isdynamicbasalspc','numel',[1],'values',[0,1]); + if(md.thermal.isenthalpy): + if numpy.isnan(md.stressbalance.reltol): + md.checkmessage("for a steadystate computation, thermal.reltol (relative convergence criterion) must be defined!") + md = checkfield(md,'fieldname','thermal.reltol','>',0.,'message',"reltol must be larger than zero"); + md = checkfield(md,'fieldname','thermal.requested_outputs','stringrow',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','spctemperature','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','penalty_threshold','format','Integer') + WriteData(fid,'object',self,'fieldname','stabilization','format','Integer') + WriteData(fid,'object',self,'fieldname','reltol','format','Double'); + WriteData(fid,'object',self,'fieldname','maxiter','format','Integer') + WriteData(fid,'object',self,'fieldname','penalty_lock','format','Integer') + WriteData(fid,'object',self,'fieldname','penalty_factor','format','Double') + WriteData(fid,'object',self,'fieldname','isenthalpy','format','Boolean') + WriteData(fid,'object',self,'fieldname','isdynamicbasalspc','format','Boolean'); + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',ThermalRequestedOutputsEnum(),'format','StringArray') + # }}} Index: ../trunk-jpl/src/py3/classes/frictioncoulomb.py =================================================================== --- ../trunk-jpl/src/py3/classes/frictioncoulomb.py (revision 0) +++ ../trunk-jpl/src/py3/classes/frictioncoulomb.py (revision 19895) @@ -0,0 +1,63 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class frictioncoulomb(object): + """ + FRICTIONCOULOMB class definition + + Usage: + frictioncoulomb=frictioncoulomb() + """ + + def __init__(self): # {{{ + self.coefficient = float('NaN') + self.coefficientcoulomb = float('NaN') + self.p = float('NaN') + self.q = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string="Basal shear stress parameters: Sigma_b = min(coefficient^2 * Neff ^r * |u_b|^(s-1) * u_b,\n coefficientcoulomb^2 * rho_i * g * (h-h_f)), (effective stress Neff=rho_ice*g*thickness+rho_water*g*bed, r=q/p and s=1/p)." + + string="%s\n%s"%(string,fielddisplay(self,"coefficient","power law (Weertman) friction coefficient [SI]")) + string="%s\n%s"%(string,fielddisplay(self,"coefficientcoulomb","Coulomb friction coefficient [SI]")) + string="%s\n%s"%(string,fielddisplay(self,"p","p exponent")) + string="%s\n%s"%(string,fielddisplay(self,"q","q exponent")) + return string + #}}} + def extrude(self,md): # {{{ + self.coefficient=project3d(md,'vector',self.coefficient,'type','node','layer',1) + self.coefficientcoulomb=project3d(md,'vector',self.coefficientcoulomb,'type','node','layer',1) + self.p=project3d(md,'vector',self.p,'type','element') + self.q=project3d(md,'vector',self.q,'type','element') + return self + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if StressbalanceAnalysisEnum() not in analyses and ThermalAnalysisEnum() not in analyses: + return md + + md = checkfield(md,'fieldname','friction.coefficient','timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','friction.coefficientcoulomb','timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','friction.q','NaN',1,'size',[md.mesh.numberofelements]) + md = checkfield(md,'fieldname','friction.p','NaN',1,'size',[md.mesh.numberofelements]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',FrictionLawEnum(),'data',1,'format','Integer') + WriteData(fid,'object',self,'fieldname','coefficient','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'enum',FrictionCoefficientEnum()) + WriteData(fid,'object',self,'fieldname','coefficientcoulomb','format','DoubleMat','mattype',1,'enum',FrictionCoefficientcoulombEnum()) + WriteData(fid,'object',self,'fieldname','p','format','DoubleMat','mattype',2,'enum',FrictionPEnum()) + WriteData(fid,'object',self,'fieldname','q','format','DoubleMat','mattype',2,'enum',FrictionQEnum()) + # }}} Index: ../trunk-jpl/src/py3/classes/gia.py =================================================================== --- ../trunk-jpl/src/py3/classes/gia.py (revision 0) +++ ../trunk-jpl/src/py3/classes/gia.py (revision 19895) @@ -0,0 +1,64 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class gia(object): + """ + GIA class definition + + Usage: + gia=gia(); + """ + + def __init__(self): # {{{ + self.mantle_viscosity = float('NaN'); + self.lithosphere_thickness = float('NaN'); + self.cross_section_shape = 0; + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + + string=' gia solution parameters:' + + string="%s\n%s"%(string,fielddisplay(self,'mantle_viscosity','mantle viscosity constraints (NaN means no constraint) (Pa s)')) + string="%s\n%s"%(string,fielddisplay(self,'lithosphere_thickness','lithosphere thickness constraints (NaN means no constraint) (m)')) + string="%s\n%s"%(string,fielddisplay(self,'cross_section_shape',"1: square-edged, 2: elliptical-edged surface")) + return string + #}}} + def extrude(self,md): # {{{ + self.mantle_viscosity=project3d(md,'vector',self.mantle_viscosity,'type','node') + self.lithosphere_thickness=project3d(md,'vector',self.lithosphere_thickness,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + + self.cross_section_shape=1; + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + # Early return + if (GiaAnalysisEnum() not in analyses): + return md + + md = checkfield(md,'fieldname','gia.mantle_viscosity','NaN',1,'size',[md.mesh.numberofvertices,1],'>',0) + md = checkfield(md,'fieldname','gia.lithosphere_thickness','NaN',1,'size',[md.mesh.numberofvertices,1],'>',0) + md = checkfield(md,'fieldname','gia.cross_section_shape','numel',[1],'values',[1,2]) + + #be sure that if we are running a masstransport ice flow model coupled with gia, that thickness forcings + #are not provided into the future. + + return md + # }}} + def marshall(self,md,fid): # {{{ + + WriteData(fid,'object',self,'fieldname','mantle_viscosity','format','DoubleMat','mattype',1); + WriteData(fid,'object',self,'fieldname','lithosphere_thickness','format','DoubleMat','mattype',1,'scale',10.**3.); + WriteData(fid,'object',self,'fieldname','cross_section_shape','format','Integer'); + # }}} Index: ../trunk-jpl/src/py3/classes/model.py =================================================================== --- ../trunk-jpl/src/py3/classes/model.py (revision 0) +++ ../trunk-jpl/src/py3/classes/model.py (revision 19895) @@ -0,0 +1,670 @@ +#module imports {{{ +import numpy +import copy +import sys +import MatlabFuncs as m +#}}} + +class model(object): + #properties + def __init__(self,*filename):#{{{ + + def netCDFread(filename): + def walktree(data): + keys = data.groups.keys() + yield keys + for key in keys: + for children in walktree(data.groups[str(key)]): + yield children + + if path.exists(filename): + print ('Opening {} for reading '.format(filename)) + NCData=Dataset(filename, 'r') + class_dict={} + + for children in walktree(NCData): + for child in children: + class_dict[str(child)]=str(getattr(NCData.groups[str(child)],'classtype')) + + return class_dict + + if filename: + classtype=netCDFread(filename[0]) + else: + classtype=self.properties() + + VT=[v[0] for v in dict.values(classtype)] + classnames=[classname for classname in dict.keys(classtype)] + module=map(__import__,VT) + + for i,mod in enumerate(module): + self.__dict__[classnames[i]] = getattr(mod,str(classtype[str(classnames[i])][0]))() + + #}}} + + def properties(self): # {{{ + # ordered list of properties since vars(self) is random + return {'mesh':['mesh2d','mesh properties'],\ + 'mask':['mask','defines grounded and floating elements'],\ + 'geometry':['geometry','surface elevation, bedrock topography, ice thickness,...'],\ + 'constants':['constants','physical constants'],\ + 'smb':['SMBpdd','surface forcings'],\ + 'basalforcings':['basalforcings','bed forcings'],\ + 'materials':['matice','material properties'],\ + 'damage':['damage','damage propagation laws'],\ + 'friction':['friction','basal friction/drag properties'],\ + 'flowequation':['flowequation','flow equations'],\ + 'timestepping':['timestepping','time stepping for transient models'],\ + 'initialization':['initialization','initial guess/state'],\ + 'rifts':['rifts','rifts properties'],\ + 'debug':['debug','debugging tools (valgrind, gprof)'],\ + 'verbose':['verbose','verbosity level in solve'],\ + 'settings':['settings','settings properties'],\ + 'toolkits':['toolkits','PETSc options for each solution'],\ + 'cluster':['generic','cluster parameters (number of cpus...)'],\ + 'balancethickness':['balancethickness','parameters for balancethickness solution'],\ + 'stressbalance':['stressbalance','parameters for stressbalance solution'],\ + 'groundingline':['groundingline','parameters for groundingline solution'],\ + 'hydrology':['hydrologyshreve','parameters for hydrology solution'],\ + 'masstransport':['masstransport','parameters for masstransport solution'],\ + 'thermal':['thermal','parameters for thermal solution'],\ + 'steadystate':['steadystate','parameters for steadystate solution'],\ + 'transient':['transient','parameters for transient solution'],\ + 'calving':['calving','parameters for calving'],\ + 'gia':['gia','Parameters for gia model'],\ + 'autodiff':['autodiff','automatic differentiation parameters'],\ + 'flaim':['flaim','flaim parameters'],\ + 'inversion':['inversion','parameters for inverse methods'],\ + 'qmu':['qmu','dakota properties'],\ + 'outputdefinition':['outputdefinition','output definition'],\ + 'results':['results','model results'],\ + 'radaroverlay':['radaroverlay','radar image for plot overlay'],\ + 'miscellaneous':['miscellaneous','miscellaneous fields'],\ + 'private':['private','...']} + # }}} + + def __repr__(obj): #{{{ + string = "Model Description" + for i,mod in enumerate(dict.keys(obj.properties())): + tmp="%19s: %-22s -- %s" % (mod,"[%s,%s]" % ("1x1",obj.__dict__[mod].__class__.__name__),obj.properties()[mod][1]) + string="\n".join([string, tmp]) + return string + # }}} + + def checkmessage(self,string): # {{{ + print(("model not consistent: ", string)) + self.private.isconsistent=False + return self + # }}} + + def extract(md,area): # {{{ + """ + extract - extract a model according to an Argus contour or flag list + + This routine extracts a submodel from a bigger model with respect to a given contour + md must be followed by the corresponding exp file or flags list + It can either be a domain file (argus type, .exp extension), or an array of element flags. + If user wants every element outside the domain to be + extract2d, add '~' to the name of the domain file (ex: '~HO.exp') + an empty string '' will be considered as an empty domain + a string 'all' will be considered as the entire domain + + Usage: + md2=extract(md,area) + + Examples: + md2=extract(md,'Domain.exp') + + See also: EXTRUDE, COLLAPSE + """ + + #copy model + md1=copy.deepcopy(md) + + #get elements that are inside area + flag_elem=FlagElements(md1,area) + if not numpy.any(flag_elem): + raise RuntimeError("extracted model is empty") + + #kick out all elements with 3 dirichlets + spc_elem=numpy.nonzero(numpy.logical_not(flag_elem))[0] + spc_node=numpy.unique(md1.mesh.elements[spc_elem,:])-1 + flag=numpy.ones(md1.mesh.numberofvertices) + flag[spc_node]=0 + pos=numpy.nonzero(numpy.logical_not(numpy.sum(flag[md1.mesh.elements-1],axis=1)))[0] + flag_elem[pos]=0 + + #extracted elements and nodes lists + pos_elem=numpy.nonzero(flag_elem)[0] + pos_node=numpy.unique(md1.mesh.elements[pos_elem,:])-1 + + #keep track of some fields + numberofvertices1=md1.mesh.numberofvertices + numberofelements1=md1.mesh.numberofelements + numberofvertices2=numpy.size(pos_node) + numberofelements2=numpy.size(pos_elem) + flag_node=numpy.zeros(numberofvertices1) + flag_node[pos_node]=1 + + #Create Pelem and Pnode (transform old nodes in new nodes and same thing for the elements) + Pelem=numpy.zeros(numberofelements1,int) + Pelem[pos_elem]=numpy.arange(1,numberofelements2+1) + Pnode=numpy.zeros(numberofvertices1,int) + Pnode[pos_node]=numpy.arange(1,numberofvertices2+1) + + #renumber the elements (some node won't exist anymore) + elements_1=copy.deepcopy(md1.mesh.elements) + elements_2=elements_1[pos_elem,:] + elements_2[:,0]=Pnode[elements_2[:,0]-1] + elements_2[:,1]=Pnode[elements_2[:,1]-1] + elements_2[:,2]=Pnode[elements_2[:,2]-1] + if md1.mesh.__class__.__name__=='mesh3dprisms': + elements_2[:,3]=Pnode[elements_2[:,3]-1] + elements_2[:,4]=Pnode[elements_2[:,4]-1] + elements_2[:,5]=Pnode[elements_2[:,5]-1] + + #OK, now create the new model! + #take every field from model + md2=copy.deepcopy(md1) + + #automatically modify fields + #loop over model fields + model_fields=vars(md1) + for fieldi in model_fields: + #get field + field=getattr(md1,fieldi) + fieldsize=numpy.shape(field) + if hasattr(field,'__dict__') and not m.ismember(fieldi,['results'])[0]: #recursive call + object_fields=vars(field) + for fieldj in object_fields: + #get field + field=getattr(getattr(md1,fieldi),fieldj) + fieldsize=numpy.shape(field) + if len(fieldsize): + #size = number of nodes * n + if fieldsize[0]==numberofvertices1: + setattr(getattr(md2,fieldi),fieldj,field[pos_node]) + elif fieldsize[0]==numberofvertices1+1: + setattr(getattr(md2,fieldi),fieldj,numpy.vstack((field[pos_node],field[-1,:]))) + #size = number of elements * n + elif fieldsize[0]==numberofelements1: + setattr(getattr(md2,fieldi),fieldj,field[pos_elem]) + else: + if len(fieldsize): + #size = number of nodes * n + if fieldsize[0]==numberofvertices1: + setattr(md2,fieldi,field[pos_node]) + elif fieldsize[0]==numberofvertices1+1: + setattr(md2,fieldi,numpy.hstack((field[pos_node],field[-1,:]))) + #size = number of elements * n + elif fieldsize[0]==numberofelements1: + setattr(md2,fieldi,field[pos_elem]) + + #modify some specific fields + + #Mesh + md2.mesh.numberofelements=numberofelements2 + md2.mesh.numberofvertices=numberofvertices2 + md2.mesh.elements=elements_2 + + #mesh.uppervertex mesh.lowervertex + if md1.mesh.__class__.__name__=='mesh3dprisms': + md2.mesh.uppervertex=md1.mesh.uppervertex[pos_node] + pos=numpy.nonzero(numpy.logical_not(md2.mesh.uppervertex==-1))[0] + md2.mesh.uppervertex[pos]=Pnode[md2.mesh.uppervertex[pos]-1] + + md2.mesh.lowervertex=md1.mesh.lowervertex[pos_node] + pos=numpy.nonzero(numpy.logical_not(md2.mesh.lowervertex==-1))[0] + md2.mesh.lowervertex[pos]=Pnode[md2.mesh.lowervertex[pos]-1] + + md2.mesh.upperelements=md1.mesh.upperelements[pos_elem] + pos=numpy.nonzero(numpy.logical_not(md2.mesh.upperelements==-1))[0] + md2.mesh.upperelements[pos]=Pelem[md2.mesh.upperelements[pos]-1] + + md2.mesh.lowerelements=md1.mesh.lowerelements[pos_elem] + pos=numpy.nonzero(numpy.logical_not(md2.mesh.lowerelements==-1))[0] + md2.mesh.lowerelements[pos]=Pelem[md2.mesh.lowerelements[pos]-1] + + #Initial 2d mesh + if md1.mesh.__class__.__name__=='mesh3dprisms': + flag_elem_2d=flag_elem[numpy.arange(0,md1.mesh.numberofelements2d)] + pos_elem_2d=numpy.nonzero(flag_elem_2d)[0] + flag_node_2d=flag_node[numpy.arange(0,md1.mesh.numberofvertices2d)] + pos_node_2d=numpy.nonzero(flag_node_2d)[0] + + md2.mesh.numberofelements2d=numpy.size(pos_elem_2d) + md2.mesh.numberofvertices2d=numpy.size(pos_node_2d) + md2.mesh.elements2d=md1.mesh.elements2d[pos_elem_2d,:] + md2.mesh.elements2d[:,0]=Pnode[md2.mesh.elements2d[:,0]-1] + md2.mesh.elements2d[:,1]=Pnode[md2.mesh.elements2d[:,1]-1] + md2.mesh.elements2d[:,2]=Pnode[md2.mesh.elements2d[:,2]-1] + + md2.mesh.x2d=md1.mesh.x[pos_node_2d] + md2.mesh.y2d=md1.mesh.y[pos_node_2d] + + #Edges + if m.strcmp(md.mesh.domaintype(),'2Dhorizontal'): + if numpy.ndim(md2.mesh.edges)>1 and numpy.size(md2.mesh.edges,axis=1)>1: + #do not use ~isnan because there are some numpy.nans... + #renumber first two columns + pos=numpy.nonzero(md2.mesh.edges[:,3]!=-1)[0] + md2.mesh.edges[: ,0]=Pnode[md2.mesh.edges[:,0]-1] + md2.mesh.edges[: ,1]=Pnode[md2.mesh.edges[:,1]-1] + md2.mesh.edges[: ,2]=Pelem[md2.mesh.edges[:,2]-1] + md2.mesh.edges[pos,3]=Pelem[md2.mesh.edges[pos,3]-1] + #remove edges when the 2 vertices are not in the domain. + md2.mesh.edges=md2.mesh.edges[numpy.nonzero(numpy.logical_and(md2.mesh.edges[:,0],md2.mesh.edges[:,1]))[0],:] + #Replace all zeros by -1 in the last two columns + pos=numpy.nonzero(md2.mesh.edges[:,2]==0)[0] + md2.mesh.edges[pos,2]=-1 + pos=numpy.nonzero(md2.mesh.edges[:,3]==0)[0] + md2.mesh.edges[pos,3]=-1 + #Invert -1 on the third column with last column (Also invert first two columns!!) + pos=numpy.nonzero(md2.mesh.edges[:,2]==-1)[0] + md2.mesh.edges[pos,2]=md2.mesh.edges[pos,3] + md2.mesh.edges[pos,3]=-1 + values=md2.mesh.edges[pos,1] + md2.mesh.edges[pos,1]=md2.mesh.edges[pos,0] + md2.mesh.edges[pos,0]=values + #Finally remove edges that do not belong to any element + pos=numpy.nonzero(numpy.logical_and(md2.mesh.edges[:,1]==-1,md2.mesh.edges[:,2]==-1))[0] + md2.mesh.edges=numpy.delete(md2.mesh.edges,pos,axis=0) + + #Penalties + if numpy.any(numpy.logical_not(numpy.isnan(md2.stressbalance.vertex_pairing))): + for i in range(numpy.size(md1.stressbalance.vertex_pairing,axis=0)): + md2.stressbalance.vertex_pairing[i,:]=Pnode[md1.stressbalance.vertex_pairing[i,:]] + md2.stressbalance.vertex_pairing=md2.stressbalance.vertex_pairing[numpy.nonzero(md2.stressbalance.vertex_pairing[:,0])[0],:] + if numpy.any(numpy.logical_not(numpy.isnan(md2.masstransport.vertex_pairing))): + for i in range(numpy.size(md1.masstransport.vertex_pairing,axis=0)): + md2.masstransport.vertex_pairing[i,:]=Pnode[md1.masstransport.vertex_pairing[i,:]] + md2.masstransport.vertex_pairing=md2.masstransport.vertex_pairing[numpy.nonzero(md2.masstransport.vertex_pairing[:,0])[0],:] + + #recreate segments + if md1.mesh.__class__.__name__=='mesh2d': + [md2.mesh.vertexconnectivity]=NodeConnectivity(md2.mesh.elements,md2.mesh.numberofvertices) + [md2.mesh.elementconnectivity]=ElementConnectivity(md2.mesh.elements,md2.mesh.vertexconnectivity) + md2.mesh.segments=contourenvelope(md2) + md2.mesh.vertexonboundary=numpy.zeros(numberofvertices2,bool) + md2.mesh.vertexonboundary[md2.mesh.segments[:,0:2]-1]=True + else: + #First do the connectivity for the contourenvelope in 2d + [md2.mesh.vertexconnectivity]=NodeConnectivity(md2.mesh.elements2d,md2.mesh.numberofvertices2d) + [md2.mesh.elementconnectivity]=ElementConnectivity(md2.mesh.elements2d,md2.mesh.vertexconnectivity) + segments=contourenvelope(md2) + md2.mesh.vertexonboundary=numpy.zeros(numberofvertices2/md2.mesh.numberoflayers,bool) + md2.mesh.vertexonboundary[segments[:,0:2]-1]=True + md2.mesh.vertexonboundary=numpy.tile(md2.mesh.vertexonboundary,md2.mesh.numberoflayers) + #Then do it for 3d as usual + [md2.mesh.vertexconnectivity]=NodeConnectivity(md2.mesh.elements,md2.mesh.numberofvertices) + [md2.mesh.elementconnectivity]=ElementConnectivity(md2.mesh.elements,md2.mesh.vertexconnectivity) + + #Boundary conditions: Dirichlets on new boundary + #Catch the elements that have not been extracted + orphans_elem=numpy.nonzero(numpy.logical_not(flag_elem))[0] + orphans_node=numpy.unique(md1.mesh.elements[orphans_elem,:])-1 + #Figure out which node are on the boundary between md2 and md1 + nodestoflag1=numpy.intersect1d(orphans_node,pos_node) + nodestoflag2=Pnode[nodestoflag1].astype(int)-1 + if numpy.size(md1.stressbalance.spcvx)>1 and numpy.size(md1.stressbalance.spcvy)>2 and numpy.size(md1.stressbalance.spcvz)>2: + if numpy.size(md1.inversion.vx_obs)>1 and numpy.size(md1.inversion.vy_obs)>1: + md2.stressbalance.spcvx[nodestoflag2]=md2.inversion.vx_obs[nodestoflag2] + md2.stressbalance.spcvy[nodestoflag2]=md2.inversion.vy_obs[nodestoflag2] + else: + md2.stressbalance.spcvx[nodestoflag2]=numpy.nan + md2.stressbalance.spcvy[nodestoflag2]=numpy.nan + print("\n!! extract warning: spc values should be checked !!\n\n") + #put 0 for vz + md2.stressbalance.spcvz[nodestoflag2]=0 + if numpy.any(numpy.logical_not(numpy.isnan(md1.thermal.spctemperature))): + md2.thermal.spctemperature[nodestoflag2,0]=1 + + #Results fields + if md1.results: + md2.results=results() + for solutionfield,field in list(md1.results.__dict__.items()): + if isinstance(field,list): + setattr(md2.results,solutionfield,[]) + #get time step + for i,fieldi in enumerate(field): + if isinstance(fieldi,results) and fieldi: + getattr(md2.results,solutionfield).append(results()) + fieldr=getattr(md2.results,solutionfield)[i] + #get subfields + for solutionsubfield,subfield in list(fieldi.__dict__.items()): + if numpy.size(subfield)==numberofvertices1: + setattr(fieldr,solutionsubfield,subfield[pos_node]) + elif numpy.size(subfield)==numberofelements1: + setattr(fieldr,solutionsubfield,subfield[pos_elem]) + else: + setattr(fieldr,solutionsubfield,subfield) + else: + getattr(md2.results,solutionfield).append(None) + elif isinstance(field,results): + setattr(md2.results,solutionfield,results()) + if isinstance(field,results) and field: + fieldr=getattr(md2.results,solutionfield) + #get subfields + for solutionsubfield,subfield in list(field.__dict__.items()): + if numpy.size(subfield)==numberofvertices1: + setattr(fieldr,solutionsubfield,subfield[pos_node]) + elif numpy.size(subfield)==numberofelements1: + setattr(fieldr,solutionsubfield,subfield[pos_elem]) + else: + setattr(fieldr,solutionsubfield,subfield) + + #Keep track of pos_node and pos_elem + md2.mesh.extractedvertices=pos_node+1 + md2.mesh.extractedelements=pos_elem+1 + return md2 + # }}} + + def extrude(md,*args): # {{{ + """ + EXTRUDE - vertically extrude a 2d mesh + + vertically extrude a 2d mesh and create corresponding 3d mesh. + The vertical distribution can: + - follow a polynomial law + - follow two polynomial laws, one for the lower part and one for the upper part of the mesh + - be discribed by a list of coefficients (between 0 and 1) + + + Usage: + md=extrude(md,numlayers,extrusionexponent) + md=extrude(md,numlayers,lowerexponent,upperexponent) + md=extrude(md,listofcoefficients) + + Example: + md=extrude(md,15,1.3); + md=extrude(md,15,1.3,1.2); + md=extrude(md,[0 0.2 0.5 0.7 0.9 0.95 1]) + + See also: MODELEXTRACT, COLLAPSE + """ + + #some checks on list of arguments + if len(args)>3 or len(args)<1: + raise RuntimeError("extrude error message") + + #Extrude the mesh + if len(args)==1: #list of coefficients + clist=args[0] + if any(clist<0) or any(clist>1): + raise TypeError("extrusioncoefficients must be between 0 and 1") + clist.extend([0.,1.]) + clist.sort() + extrusionlist=list(set(clist)) + numlayers=len(extrusionlist) + + elif len(args)==2: #one polynomial law + if args[1]<=0: + raise TypeError("extrusionexponent must be >=0") + numlayers=args[0] + extrusionlist=(numpy.arange(0.,float(numlayers-1)+1.,1.)/float(numlayers-1))**args[1] + + elif len(args)==3: #two polynomial laws + numlayers=args[0] + lowerexp=args[1] + upperexp=args[2] + + if args[1]<=0 or args[2]<=0: + raise TypeError("lower and upper extrusionexponents must be >=0") + + lowerextrusionlist=(numpy.arange(0.,1.+2./float(numlayers-1),2./float(numlayers-1)))**lowerexp/2. + upperextrusionlist=(numpy.arange(0.,1.+2./float(numlayers-1),2./float(numlayers-1)))**upperexp/2. + extrusionlist=numpy.unique(numpy.concatenate((lowerextrusionlist,1.-upperextrusionlist))) + + if numlayers<2: + raise TypeError("number of layers should be at least 2") + if md.mesh.__class__.__name__=='mesh3dprisms': + raise TypeError("Cannot extrude a 3d mesh (extrude cannot be called more than once)") + + #Initialize with the 2d mesh + mesh2d = md.mesh + md.mesh=mesh3dprisms() + md.mesh.x = mesh2d.x + md.mesh.y = mesh2d.y + md.mesh.elements = mesh2d.elements + md.mesh.numberofelements = mesh2d.numberofelements + md.mesh.numberofvertices = mesh2d.numberofvertices + + md.mesh.lat = mesh2d.lat + md.mesh.long = mesh2d.long + md.mesh.epsg = mesh2d.epsg + + md.mesh.vertexonboundary = mesh2d.vertexonboundary + md.mesh.vertexconnectivity = mesh2d.vertexconnectivity + md.mesh.elementconnectivity = mesh2d.elementconnectivity + md.mesh.average_vertex_connectivity = mesh2d.average_vertex_connectivity + + md.mesh.extractedvertices = mesh2d.extractedvertices + md.mesh.extractedelements = mesh2d.extractedelements + + x3d=numpy.empty((0)) + y3d=numpy.empty((0)) + z3d=numpy.empty((0)) #the lower node is on the bed + thickness3d=md.geometry.thickness #thickness and bed for these nodes + bed3d=md.geometry.base + + #Create the new layers + for i in range(numlayers): + x3d=numpy.concatenate((x3d,md.mesh.x)) + y3d=numpy.concatenate((y3d,md.mesh.y)) + #nodes are distributed between bed and surface accordingly to the given exponent + z3d=numpy.concatenate((z3d,(bed3d+thickness3d*extrusionlist[i]).reshape(-1))) + number_nodes3d=numpy.size(x3d) #number of 3d nodes for the non extruded part of the mesh + + #Extrude elements + elements3d=numpy.empty((0,6),int) + for i in range(numlayers-1): + elements3d=numpy.vstack((elements3d,numpy.hstack((md.mesh.elements+i*md.mesh.numberofvertices,md.mesh.elements+(i+1)*md.mesh.numberofvertices)))) #Create the elements of the 3d mesh for the non extruded part + number_el3d=numpy.size(elements3d,axis=0) #number of 3d nodes for the non extruded part of the mesh + + #Keep a trace of lower and upper nodes + lowervertex=-1*numpy.ones(number_nodes3d,int) + uppervertex=-1*numpy.ones(number_nodes3d,int) + lowervertex[md.mesh.numberofvertices:]=numpy.arange(1,(numlayers-1)*md.mesh.numberofvertices+1) + uppervertex[:(numlayers-1)*md.mesh.numberofvertices]=numpy.arange(md.mesh.numberofvertices+1,number_nodes3d+1) + md.mesh.lowervertex=lowervertex + md.mesh.uppervertex=uppervertex + + #same for lower and upper elements + lowerelements=-1*numpy.ones(number_el3d,int) + upperelements=-1*numpy.ones(number_el3d,int) + lowerelements[md.mesh.numberofelements:]=numpy.arange(1,(numlayers-2)*md.mesh.numberofelements+1) + upperelements[:(numlayers-2)*md.mesh.numberofelements]=numpy.arange(md.mesh.numberofelements+1,(numlayers-1)*md.mesh.numberofelements+1) + md.mesh.lowerelements=lowerelements + md.mesh.upperelements=upperelements + + #Save old mesh + md.mesh.x2d=md.mesh.x + md.mesh.y2d=md.mesh.y + md.mesh.elements2d=md.mesh.elements + md.mesh.numberofelements2d=md.mesh.numberofelements + md.mesh.numberofvertices2d=md.mesh.numberofvertices + + #Build global 3d mesh + md.mesh.elements=elements3d + md.mesh.x=x3d + md.mesh.y=y3d + md.mesh.z=z3d + md.mesh.numberofelements=number_el3d + md.mesh.numberofvertices=number_nodes3d + md.mesh.numberoflayers=numlayers + + #Ok, now deal with the other fields from the 2d mesh: + + #bedinfo and surface info + md.mesh.vertexonbase=project3d(md,'vector',numpy.ones(md.mesh.numberofvertices2d,bool),'type','node','layer',1) + md.mesh.vertexonsurface=project3d(md,'vector',numpy.ones(md.mesh.numberofvertices2d,bool),'type','node','layer',md.mesh.numberoflayers) + md.mesh.vertexonboundary=project3d(md,'vector',md.mesh.vertexonboundary,'type','node') + + #lat long + md.mesh.lat=project3d(md,'vector',md.mesh.lat,'type','node') + md.mesh.long=project3d(md,'vector',md.mesh.long,'type','node') + + md.geometry.extrude(md) + md.friction.extrude(md) + md.inversion.extrude(md) + md.smb.extrude(md) + md.initialization.extrude(md) + md.flowequation.extrude(md) + + md.stressbalance.extrude(md) + md.thermal.extrude(md) + md.masstransport.extrude(md) + + # Calving variables + md.hydrology.extrude(md) + md.calving.extrude(md) + + #connectivity + md.mesh.elementconnectivity=numpy.tile(md.mesh.elementconnectivity,(numlayers-1,1)) + md.mesh.elementconnectivity[numpy.nonzero(md.mesh.elementconnectivity==0)]=-sys.maxsize-1 + if not numpy.isnan(md.mesh.elementconnectivity).all(): + for i in range(1,numlayers-1): + md.mesh.elementconnectivity[i*md.mesh.numberofelements2d:(i+1)*md.mesh.numberofelements2d,:] \ + =md.mesh.elementconnectivity[i*md.mesh.numberofelements2d:(i+1)*md.mesh.numberofelements2d,:]+md.mesh.numberofelements2d + md.mesh.elementconnectivity[numpy.nonzero(md.mesh.elementconnectivity<0)]=0 + + md.materials.extrude(md) + md.damage.extrude(md) + md.gia.extrude(md) + md.mask.extrude(md) + md.qmu.extrude(md) + md.basalforcings.extrude(md) + + #increase connectivity if less than 25: + if md.mesh.average_vertex_connectivity<=25: + md.mesh.average_vertex_connectivity=100 + + return md + # }}} + def collapse(md): #{{{ + ''' + collapses a 3d mesh into a 2d mesh + + This routine collapses a 3d model into a 2d model and collapses all + the fileds of the 3d model by taking their depth-averaged values + + Usage: + md=collapse(md) + ''' + + #Check that the model is really a 3d model + if md.mesh.domaintype().lower() != '3d': + raise Exception("only a 3D model can be collapsed") + + #drag is limited to nodes that are on the bedrock. + md.friction.coefficient=project2d(md,md.friction.coefficient,1) + + #p and q (same deal, except for element that are on the bedrock: ) + md.friction.p=project2d(md,md.friction.p,1) + md.friction.q=project2d(md,md.friction.q,1) + + #observations + if not numpy.isnan(md.inversion.vx_obs).all(): md.inversion.vx_obs=project2d(md,md.inversion.vx_obs,md.mesh.numberoflayers) + if not numpy.isnan(md.inversion.vy_obs).all(): md.inversion.vy_obs=project2d(md,md.inversion.vy_obs,md.mesh.numberoflayers) + if not numpy.isnan(md.inversion.vel_obs).all(): md.inversion.vel_obs=project2d(md,md.inversion.vel_obs,md.mesh.numberoflayers) + if not numpy.isnan(md.inversion.cost_functions_coefficients).all(): md.inversion.cost_functions_coefficients=project2d(md,md.inversion.cost_functions_coefficients,md.mesh.numberoflayers) + if isinstance(md.inversion.min_parameters,numpy.ndarray): + if md.inversion.min_parameters.size>1: md.inversion.min_parameters=project2d(md,md.inversion.min_parameters,md.mesh.numberoflayers) + if isinstance(md.inversion.max_parameters,numpy.ndarray): + if md.inversion.max_parameters.size>1: md.inversion.max_parameters=project2d(md,md.inversion.max_parameters,md.mesh.numberoflayers) + if not numpy.isnan(md.smb.mass_balance).all(): + md.smb.mass_balance=project2d(md,md.smb.mass_balance,md.mesh.numberoflayers) + + if not numpy.isnan(md.balancethickness.thickening_rate).all(): md.balancethickness.thickening_rate=project2d(md,md.balancethickness.thickening_rate,md.mesh.numberoflayers) + + #results + if not numpy.isnan(md.initialization.vx).all(): md.initialization.vx=DepthAverage(md,md.initialization.vx) + if not numpy.isnan(md.initialization.vy).all(): md.initialization.vy=DepthAverage(md,md.initialization.vy) + if not numpy.isnan(md.initialization.vz).all(): md.initialization.vz=DepthAverage(md,md.initialization.vz) + if not numpy.isnan(md.initialization.vel).all(): md.initialization.vel=DepthAverage(md,md.initialization.vel) + if not numpy.isnan(md.initialization.temperature).all(): md.initialization.temperature=DepthAverage(md,md.initialization.temperature) + if not numpy.isnan(md.initialization.pressure).all(): md.initialization.pressure=project2d(md,md.initialization.pressure,1) + if not numpy.isnan(md.initialization.sediment_head).all(): md.initialization.sediment_head=project2d(md,md.initialization.sediment_head,1) + if not numpy.isnan(md.initialization.epl_head).all(): md.initialization.epl_head=project2d(md,md.initialization.epl_head,1) + if not numpy.isnan(md.initialization.epl_thickness).all(): md.initialization.epl_thickness=project2d(md,md.initialization.epl_thickness,1) + + #gia + if not numpy.isnan(md.gia.mantle_viscosity).all(): md.gia.mantle_viscosity=project2d(md,md.gia.mantle_viscosity,1) + if not numpy.isnan(md.gia.lithosphere_thickness).all(): md.gia.lithosphere_thickness=project2d(md,md.gia.lithosphere_thickness,1) + + #elementstype + if not numpy.isnan(md.flowequation.element_equation).all(): + md.flowequation.element_equation=project2d(md,md.flowequation.element_equation,1) + md.flowequation.vertex_equation=project2d(md,md.flowequation.vertex_equation,1) + md.flowequation.borderSSA=project2d(md,md.flowequation.borderSSA,1) + md.flowequation.borderHO=project2d(md,md.flowequation.borderHO,1) + md.flowequation.borderFS=project2d(md,md.flowequation.borderFS,1) + + # Hydrologydc variables + if hasattr(md.hydrology,'hydrologydc'): + md.hydrology.spcsediment_head=project2d(md,md.hydrology.spcsediment_head,1) + md.hydrology.mask_eplactive_node=project2d(md,md.hydrology.mask_eplactive_node,1) + md.hydrology.sediment_transmitivity=project2d(md,md.hydrology.sediment_transmitivity,1) + md.hydrology.basal_moulin_input=project2d(md,md.hydrology.basal_moulin_input,1) + if md.hydrology.isefficientlayer == 1: + md.hydrology.spcepl_head=project2d(md,md.hydrology.spcepl_head,1) + + #boundary conditions + md.stressbalance.spcvx=project2d(md,md.stressbalance.spcvx,md.mesh.numberoflayers) + md.stressbalance.spcvy=project2d(md,md.stressbalance.spcvy,md.mesh.numberoflayers) + md.stressbalance.spcvz=project2d(md,md.stressbalance.spcvz,md.mesh.numberoflayers) + md.stressbalance.referential=project2d(md,md.stressbalance.referential,md.mesh.numberoflayers) + md.stressbalance.loadingforce=project2d(md,md.stressbalance.loadingforce,md.mesh.numberoflayers) + md.masstransport.spcthickness=project2d(md,md.masstransport.spcthickness,md.mesh.numberoflayers) + if not numpy.isnan(md.damage.spcdamage).all(): md.damage.spcdamage=project2d(md,md.damage.spcdamage,md.mesh.numberoflayers-1) + md.thermal.spctemperature=project2d(md,md.thermal.spctemperature,md.mesh.numberoflayers-1) + + #materials + md.materials.rheology_B=DepthAverage(md,md.materials.rheology_B) + md.materials.rheology_n=project2d(md,md.materials.rheology_n,1) + + #damage: + if md.damage.isdamage: + md.damage.D=DepthAverage(md,md.damage.D) + + #special for thermal modeling: + md.basalforcings.groundedice_melting_rate=project2d(md,md.basalforcings.groundedice_melting_rate,1) + md.basalforcings.floatingice_melting_rate=project2d(md,md.basalforcings.floatingice_melting_rate,1) + md.basalforcings.geothermalflux=project2d(md,md.basalforcings.geothermalflux,1) #bedrock only gets geothermal flux + + #update of connectivity matrix + md.mesh.average_vertex_connectivity=25 + + #Collapse the mesh + nodes2d=md.mesh.numberofvertices2d + elements2d=md.mesh.numberofelements2d + + #parameters + md.geometry.surface=project2d(md,md.geometry.surface,1) + md.geometry.thickness=project2d(md,md.geometry.thickness,1) + md.geometry.base=project2d(md,md.geometry.base,1) + if isinstance(md.geometry.bed,numpy.ndarray): + md.geometry.bed=project2d(md,md.geometry.bed,1) + md.mask.groundedice_levelset=project2d(md,md.mask.groundedice_levelset,1) + md.mask.ice_levelset=project2d(md,md.mask.ice_levelset,1) + + #lat long + if isinstance(md.mesh.lat,numpy.ndarray): + if md.mesh.lat.size==md.mesh.numberofvertices: md.mesh.lat=project2d(md,md.mesh.lat,1) + if isinstance(md.mesh.long,numpy.ndarray): + if md.mesh.long.size==md.mesh.numberofvertices: md.mesh.long=project2d(md,md.mesh.long,1) + + #Initialize with the 2d mesh + mesh=mesh2d() + mesh.x=md.mesh.x2d + mesh.y=md.mesh.y2d + mesh.numberofvertices=md.mesh.numberofvertices2d + mesh.numberofelements=md.mesh.numberofelements2d + mesh.elements=md.mesh.elements2d + if not numpy.isnan(md.mesh.vertexonboundary).all(): mesh.vertexonboundary=project2d(md,md.mesh.vertexonboundary,1) + if not numpy.isnan(md.mesh.elementconnectivity).all(): mesh.elementconnectivity=project2d(md,md.mesh.elementconnectivity,1) + md.mesh=mesh + + return md + +#}}} Index: ../trunk-jpl/src/py3/classes/settings.py =================================================================== --- ../trunk-jpl/src/py3/classes/settings.py (revision 0) +++ ../trunk-jpl/src/py3/classes/settings.py (revision 19895) @@ -0,0 +1,80 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class settings(object): + """ + SETTINGS class definition + + Usage: + settings=settings(); + """ + + def __init__(self): # {{{ + self.results_on_nodes = 0 + self.io_gather = 0 + self.lowmem = 0 + self.output_frequency = 0 + self.recording_frequency = 0 + self.waitonlock = 0 + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" general settings parameters:" + + string="%s\n%s"%(string,fielddisplay(self,"results_on_nodes","results are output for all the nodes of each element")) + string="%s\n%s"%(string,fielddisplay(self,"io_gather","I/O gathering strategy for result outputs (default 1)")) + string="%s\n%s"%(string,fielddisplay(self,"lowmem","is the memory limited ? (0 or 1)")) + string="%s\n%s"%(string,fielddisplay(self,"output_frequency","frequency at which results are saved in all solutions with multiple time_steps")) + string="%s\n%s"%(string,fielddisplay(self,"recording_frequency","frequency at which the runs are being recorded, allowing for a restart")) + string="%s\n%s"%(string,fielddisplay(self,"waitonlock","maximum number of minutes to wait for batch results, or return 0")) + return string + #}}} + def setdefaultparameters(self): # {{{ + + #are we short in memory ? (0 faster but requires more memory) + self.lowmem=0 + + #i/o: + self.io_gather=1 + + #results frequency by default every step + self.output_frequency=1 + + #checkpoints frequency, by default never: + self.recording_frequency=0 + + + #this option can be activated to load automatically the results + #onto the model after a parallel run by waiting for the lock file + #N minutes that is generated once the solution has converged + #0 to deactivate + self.waitonlock=2**31-1 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + md = checkfield(md,'fieldname','settings.results_on_nodes','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','settings.io_gather','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','settings.lowmem','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','settings.output_frequency','numel',[1],'>=',1) + md = checkfield(md,'fieldname','settings.recording_frequency','numel',[1],'>=',0) + md = checkfield(md,'fieldname','settings.waitonlock','numel',[1]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','results_on_nodes','format','Boolean') + WriteData(fid,'object',self,'fieldname','io_gather','format','Boolean') + WriteData(fid,'object',self,'fieldname','lowmem','format','Boolean') + WriteData(fid,'object',self,'fieldname','output_frequency','format','Integer') + WriteData(fid,'object',self,'fieldname','recording_frequency','format','Integer') + if self.waitonlock>0: + WriteData(fid,'enum',SettingsWaitonlockEnum(),'data',True,'format','Boolean'); + else: + WriteData(fid,'enum',SettingsWaitonlockEnum(),'data',False,'format','Boolean'); + # }}} Index: ../trunk-jpl/src/py3/classes/autodiff.py =================================================================== --- ../trunk-jpl/src/py3/classes/autodiff.py (revision 0) +++ ../trunk-jpl/src/py3/classes/autodiff.py (revision 19895) @@ -0,0 +1,215 @@ +import numpy +from dependent import dependent +from independent import independent +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class autodiff(object): + """ + AUTODIFF class definition + + Usage: + autodiff=autodiff(); + """ + def __init__(self,*args): # {{{ + self.isautodiff = False + self.dependents = [] + self.independents = [] + self.driver = 'fos_forward' + self.obufsize = float('NaN') + self.lbufsize = float('NaN') + self.cbufsize = float('NaN') + self.tbufsize = float('NaN') + self.gcTriggerMaxSize = float('NaN') + self.gcTriggerRatio = float('NaN') + if not len(args): + self.setdefaultparameters() + else: + raise RuntimeError("constructor not supported") + # }}} + def __repr__(self): # {{{ + s =" automatic differentiation parameters:\n" + + s+="%s\n" % fielddisplay(self,'isautodiff',"indicates if the automatic differentiation is activated") + s+="%s\n" % fielddisplay(self,'dependents',"list of dependent variables") + s+="%s\n" % fielddisplay(self,'independents',"list of independent variables") + s+="%s\n" % fielddisplay(self,'driver',"ADOLC driver ('fos_forward' or 'fov_forward')") + s+="%s\n" % fielddisplay(self,'obufsize',"Number of operations per buffer (==OBUFSIZE in usrparms.h)") + s+="%s\n" % fielddisplay(self,'lbufsize',"Number of locations per buffer (==LBUFSIZE in usrparms.h)") + s+="%s\n" % fielddisplay(self,'cbufsize',"Number of values per buffer (==CBUFSIZE in usrparms.h)") + s+="%s\n" % fielddisplay(self,'tbufsize',"Number of taylors per buffer (<=TBUFSIZE in usrparms.h)") + s+="%s\n" % fielddisplay(self,'gcTriggerRatio',"free location block sorting/consolidation triggered if the ratio between allocated and used locations exceeds gcTriggerRatio") + s+="%s\n" % fielddisplay(self,'gcTriggerMaxSize',"free location block sorting/consolidation triggered if the allocated locations exceed gcTriggerMaxSize)") + + return s + # }}} + def setdefaultparameters(self): # {{{ + + self.obufsize = 524288 + self.lbufsize = 524288 + self.cbufsize = 524288 + self.tbufsize = 524288 + self.gcTriggerRatio=2.0 + self.gcTriggerMaxSize=65536 + return self + # }}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if not self.isautodiff: + return md + + md = checkfield(md,'fieldname','autodiff.obufsize','>=',524288) + md = checkfield(md,'fieldname','autodiff.lbufsize','>=',524288) + md = checkfield(md,'fieldname','autodiff.cbufsize','>=',524288) + md = checkfield(md,'fieldname','autodiff.tbufsize','>=',524288) + md = checkfield(md,'fieldname','autodiff.gcTriggerRatio','>=',2.0) + md = checkfield(md,'fieldname','autodiff.gcTriggerMaxSize','>=',2000000) + + #Driver value: + md = checkfield(md,'fieldname','autodiff.driver','values',['fos_forward','fov_forward','fov_forward_all','fos_reverse','fov_reverse','fov_reverse_all']) + + #go through our dependents and independents and check consistency: + for dep in self.dependents: + dep.checkconsistency(md,solution,analyses) + for i,indep in enumerate(self.independents): + indep.checkconsistency(md,i,solution,analyses,self.driver) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','isautodiff','format','Boolean') + WriteData(fid,'object',self,'fieldname','driver','format','String') + + #early return + if not self.isautodiff: + WriteData(fid,'data',False,'enum',AutodiffMassFluxSegmentsPresentEnum(),'format','Boolean') + WriteData(fid,'data',False,'enum',AutodiffKeepEnum(),'format','Boolean') + return + + #buffer sizes {{{ + WriteData(fid,'object',self,'fieldname','obufsize','format','Double'); + WriteData(fid,'object',self,'fieldname','lbufsize','format','Double'); + WriteData(fid,'object',self,'fieldname','cbufsize','format','Double'); + WriteData(fid,'object',self,'fieldname','tbufsize','format','Double'); + WriteData(fid,'object',self,'fieldname','gcTriggerRatio','format','Double'); + WriteData(fid,'object',self,'fieldname','gcTriggerMaxSize','format','Double'); + #}}} + #process dependent variables {{{ + num_dependent_objects=len(self.dependents) + WriteData(fid,'data',num_dependent_objects,'enum',AutodiffNumDependentObjectsEnum(),'format','Integer') + + if num_dependent_objects: + names=[] + types=numpy.zeros(num_dependent_objects) + indices=numpy.zeros(num_dependent_objects) + + for i,dep in enumerate(self.dependents): + names[i]=dep.name + types[i]=dep.typetoscalar() + indices[i]=dep.index + + WriteData(fid,'data',names,'enum',AutodiffDependentObjectNamesEnum(),'format','StringArray') + WriteData(fid,'data',types,'enum',AutodiffDependentObjectTypesEnum(),'format','IntMat','mattype',3) + WriteData(fid,'data',indices,'enum',AutodiffDependentObjectIndicesEnum(),'format','IntMat','mattype',3) + #}}} + #process independent variables {{{ + num_independent_objects=len(self.independents) + WriteData(fid,'data',num_independent_objects,'enum',AutodiffNumIndependentObjectsEnum(),'format','Integer') + + if num_independent_objects: + names=numpy.zeros(num_independent_objects) + types=numpy.zeros(num_independent_objects) + + for i,indep in enumerate(self.independents): + names[i]=StringToEnum(indep.name)[0] + types[i]=indep.typetoscalar() + + WriteData(fid,'data',names,'enum',AutodiffIndependentObjectNamesEnum(),'format','IntMat','mattype',3) + WriteData(fid,'data',types,'enum',AutodiffIndependentObjectTypesEnum(),'format','IntMat','mattype',3) + #}}} + #if driver is fos_forward, build index: {{{ + if strcmpi(self.driver,'fos_forward'): + index=0 + + for indep in self.independents: + if not numpy.isnan(indep.fos_forward_index): + index+=indep.fos_forward_index + break + else: + if strcmpi(indep.type,'scalar'): + index+=1 + else: + index+=indep.nods + + index-=1 #get c-index numbering going + WriteData(fid,'data',index,'enum',AutodiffFosForwardIndexEnum(),'format','Integer') + #}}} + #if driver is fos_reverse, build index: {{{ + if strcmpi(self.driver,'fos_reverse'): + index=0 + + for dep in self.dependents: + if not numpy.isnan(dep.fos_reverse_index): + index+=dep.fos_reverse_index + break + else: + if strcmpi(dep.type,'scalar'): + index+=1 + else: + index+=dep.nods + + index-=1 #get c-index numbering going + WriteData(fid,'data',index,'enum',AutodiffFosReverseIndexEnum(),'format','Integer') + #}}} + #if driver is fov_forward, build indices: {{{ + if strcmpi(self.driver,'fov_forward'): + indices=0 + + for indep in self.independents: + if indep.fos_forward_index: + indices+=indep.fov_forward_indices + break + else: + if strcmpi(indep.type,'scalar'): + indices+=1 + else: + indices+=indep.nods + + indices-=1 #get c-indices numbering going + WriteData(fid,'data',indices,'enum',AutodiffFovForwardIndicesEnum(),'format','IntMat','mattype',3) + #}}} + #deal with mass fluxes: {{{ + mass_flux_segments=[dep.segments for dep in self.dependents if strcmpi(dep.name,'MassFlux')] + + if mass_flux_segments: + WriteData(fid,'data',mass_flux_segments,'enum',MassFluxSegmentsEnum(),'format','MatArray') + flag=True + else: + flag=False + WriteData(fid,'data',flag,'enum',AutodiffMassFluxSegmentsPresentEnum(),'format','Boolean') + #}}} + #deal with trace keep on: {{{ + keep=False + + #From ADOLC userdoc: + # The optional integer argument keep of trace on determines whether the numerical values of all active variables are + # recorded in a buffered temporary array or file called the taylor stack. This option takes effect if keep = 1 and + # prepares the scene for an immediately following gradient evaluation by a call to a routine implementing the reverse + # mode as described in the Section 4 and Section 5. + # + + if len(self.driver)<=3: + keep=False #there is no "_reverse" string within the driver string: + else: + if strncmpi(self.driver[3:],'_reverse',8): + keep=True + else: + keep=False + WriteData(fid,'data',keep,'enum',AutodiffKeepEnum(),'format','Boolean') + #}}} + + return + # }}} Index: ../trunk-jpl/src/py3/classes/balancethickness.py =================================================================== --- ../trunk-jpl/src/py3/classes/balancethickness.py (revision 0) +++ ../trunk-jpl/src/py3/classes/balancethickness.py (revision 19895) @@ -0,0 +1,57 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class balancethickness(object): + """ + BALANCETHICKNESS class definition + + Usage: + balancethickness=balancethickness(); + """ + + def __init__(self): # {{{ + self.spcthickness = float('NaN') + self.thickening_rate = float('NaN') + self.stabilization = 0 + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + + string=' balance thickness solution parameters:' + + string="%s\n%s"%(string,fielddisplay(self,'spcthickness','thickness constraints (NaN means no constraint) [m]')) + string="%s\n%s"%(string,fielddisplay(self,'thickening_rate','ice thickening rate used in the mass conservation (dh/dt) [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'stabilization',"0: None, 1: SU, 2: SSA's artificial diffusivity, 3:DG")) + return string + #}}} + def setdefaultparameters(self): # {{{ + + #Type of stabilization used + self.stabilization=1 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + #Early return + if not solution==BalancethicknessSolutionEnum(): + return md + + md = checkfield(md,'fieldname','balancethickness.spcthickness') + md = checkfield(md,'fieldname','balancethickness.thickening_rate','size',[md.mesh.numberofvertices],'NaN',1) + md = checkfield(md,'fieldname','balancethickness.stabilization','size',[1],'values',[0,1,2,3]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'object',self,'fieldname','spcthickness','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','thickening_rate','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'fieldname','stabilization','format','Integer') + # }}} Index: ../trunk-jpl/src/py3/classes/matdamageice.py =================================================================== --- ../trunk-jpl/src/py3/classes/matdamageice.py (revision 0) +++ ../trunk-jpl/src/py3/classes/matdamageice.py (revision 19895) @@ -0,0 +1,156 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import MaterialsEnum, MatdamageiceEnum, MaterialsRheologyLawEnum, MaterialsRhoSeawaterEnum +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData + +class matdamageice(object): + """ + MATICE class definition + + Usage: + matdamagice=matdamageice(); + """ + + def __init__(self): # {{{ + self.rho_ice = 0. + self.rho_water = 0. + self.rho_freshwater = 0. + self.mu_water = 0. + self.heatcapacity = 0. + self.latentheat = 0. + self.thermalconductivity = 0. + self.temperateiceconductivity = 0. + self.meltingpoint = 0. + self.beta = 0. + self.mixed_layer_capacity = 0. + self.thermal_exchange_velocity = 0. + self.rheology_B = float('NaN') + self.rheology_n = float('NaN') + self.rheology_law = '' + + #gia: + self.lithosphere_shear_modulus = 0. + self.lithosphere_density = 0. + self.mantle_shear_modulus = 0. + self.mantle_density = 0. + + self.setdefaultparameters() + #}}} + def __repr__(self): # {{{ + string=" Materials:" + + string="%s\n%s"%(string,fielddisplay(self,"rho_ice","ice density [kg/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"rho_water","water density [kg/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"rho_freshwater","fresh water density [kg/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"mu_water","water viscosity [N s/m^2]")) + string="%s\n%s"%(string,fielddisplay(self,"heatcapacity","heat capacity [J/kg/K]")) + string="%s\n%s"%(string,fielddisplay(self,"thermalconductivity","ice thermal conductivity [W/m/K]")) + string="%s\n%s"%(string,fielddisplay(self,"temperateiceconductivity","temperate ice thermal conductivity [W/m/K]")) + string="%s\n%s"%(string,fielddisplay(self,"meltingpoint","melting point of ice at 1atm in K")) + string="%s\n%s"%(string,fielddisplay(self,"latentheat","latent heat of fusion [J/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"beta","rate of change of melting point with pressure [K/Pa]")) + string="%s\n%s"%(string,fielddisplay(self,"mixed_layer_capacity","mixed layer capacity [W/kg/K]")) + string="%s\n%s"%(string,fielddisplay(self,"thermal_exchange_velocity","thermal exchange velocity [m/s]")) + string="%s\n%s"%(string,fielddisplay(self,"rheology_B","flow law parameter [Pa/s^(1/n)]")) + string="%s\n%s"%(string,fielddisplay(self,"rheology_n","Glen's flow law exponent")) + string="%s\n%s"%(string,fielddisplay(self,"rheology_law","law for the temperature dependance of the rheology: 'None', 'Cuffey', 'Paterson', 'Arrhenius' or 'LliboutryDuval'")) + string="%s\n%s"%(string,fielddisplay(self,"lithosphere_shear_modulus","Lithosphere shear modulus [Pa]")) + string="%s\n%s"%(string,fielddisplay(self,"lithosphere_density","Lithosphere density [g/cm^-3]")) + string="%s\n%s"%(string,fielddisplay(self,"mantle_shear_modulus","Mantle shear modulus [Pa]")) + string="%s\n%s"%(string,fielddisplay(self,"mantle_density","Mantle density [g/cm^-3]")) + + return string + #}}} + def extrude(self,md): # {{{ + self.rheology_B=project3d(md,'vector',self.rheology_B,'type','node') + self.rheology_n=project3d(md,'vector',self.rheology_n,'type','element') + return self + #}}} + def setdefaultparameters(self): # {{{ + #ice density (kg/m^3) + self.rho_ice=917. + + #ocean water density (kg/m^3) + self.rho_water=1023. + + #fresh water density (kg/m^3) + self.rho_freshwater=1000. + + #water viscosity (N.s/m^2) + self.mu_water=0.001787 + + #ice heat capacity cp (J/kg/K) + self.heatcapacity=2093. + + #ice latent heat of fusion L (J/kg) + self.latentheat=3.34*10**5 + + #ice thermal conductivity (W/m/K) + self.thermalconductivity=2.4 + + #temperate ice thermal conductivity (W/m/K) + self.temperateiceconductivity=0.24 + + #the melting point of ice at 1 atmosphere of pressure in K + self.meltingpoint=273.15 + + #rate of change of melting point with pressure (K/Pa) + self.beta=9.8*10**-8 + + #mixed layer (ice-water interface) heat capacity (J/kg/K) + self.mixed_layer_capacity=3974. + + #thermal exchange velocity (ice-water interface) (m/s) + self.thermal_exchange_velocity=1.00*10**-4 + + #Rheology law: what is the temperature dependence of B with T + #available: none, paterson and arrhenius + self.rheology_law='Paterson' + + # GIA: + self.lithosphere_shear_modulus = 6.7*10**10 # (Pa) + self.lithosphere_density = 3.32 # (g/cm^-3) + self.mantle_shear_modulus = 1.45*10**11 # (Pa) + self.mantle_density = 3.34 # (g/cm^-3) + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + md = checkfield(md,'fieldname','materials.rho_ice','>',0) + md = checkfield(md,'fieldname','materials.rho_water','>',0) + md = checkfield(md,'fieldname','materials.rho_freshwater','>',0) + md = checkfield(md,'fieldname','materials.mu_water','>',0) + md = checkfield(md,'fieldname','materials.rheology_B','>',0,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','materials.rheology_n','>',0,'size',[md.mesh.numberofelements]) + md = checkfield(md,'fieldname','materials.rheology_law','values',['None','Cuffey','Paterson','Arrhenius','LliboutryDuval']) + md = checkfield(md,'fieldname','materials.lithosphere_shear_modulus','>',0,'numel',[1]); + md = checkfield(md,'fieldname','materials.lithosphere_density','>',0,'numel',[1]); + md = checkfield(md,'fieldname','materials.mantle_shear_modulus','>',0,'numel',[1]); + md = checkfield(md,'fieldname','materials.mantle_density','>',0,'numel',[1]); + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',MaterialsEnum(),'data',MatdamageiceEnum(),'format','Integer'); + WriteData(fid,'object',self,'class','materials','fieldname','rho_ice','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','rho_water','enum',MaterialsRhoSeawaterEnum(),'format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','rho_freshwater','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','mu_water','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','heatcapacity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','latentheat','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','thermalconductivity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','temperateiceconductivity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','meltingpoint','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','beta','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','mixed_layer_capacity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','thermal_exchange_velocity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','rheology_B','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','materials','fieldname','rheology_n','format','DoubleMat','mattype',2) + WriteData(fid,'data',StringToEnum(self.rheology_law)[0],'enum',MaterialsRheologyLawEnum(),'format','Integer') + + WriteData(fid,'object',self,'class','materials','fieldname','lithosphere_shear_modulus','format','Double'); + WriteData(fid,'object',self,'class','materials','fieldname','lithosphere_density','format','Double','scale',10.**3.); + WriteData(fid,'object',self,'class','materials','fieldname','mantle_shear_modulus','format','Double'); + WriteData(fid,'object',self,'class','materials','fieldname','mantle_density','format','Double','scale',10.**3.); + # }}} Index: ../trunk-jpl/src/py3/classes/groundingline.py =================================================================== --- ../trunk-jpl/src/py3/classes/groundingline.py (revision 0) +++ ../trunk-jpl/src/py3/classes/groundingline.py (revision 19895) @@ -0,0 +1,54 @@ +import numpy +from fielddisplay import fielddisplay +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class groundingline(object): + """ + GROUNDINGLINE class definition + + Usage: + groundingline=groundingline(); + """ + + def __init__(self): # {{{ + self.migration='' + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' grounding line migration parameters:' + + string="%s\n%s"%(string,fielddisplay(self,'migration','type of grounding line migration: ''SoftMigration'',''AggressiveMigration'',''SubelementMigration'',''SubelementMigration2'',''Contact'',''None''')) + return string + #}}} + def setdefaultparameters(self): # {{{ + + #Type of migration + self.migration='None' + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,fieldname='groundingline.migration',values=['None','AggressiveMigration','SoftMigration','SubelementMigration','SubelementMigration2','Contact','GroundingOnly']) + + if not m.strcmp(self.migration,'None'): + if numpy.any(numpy.isnan(md.geometry.bed)): + md.checkmessage("requesting grounding line migration, but bathymetry is absent!") + pos=numpy.nonzero(md.mask.groundedice_levelset>0.)[0] + if any(numpy.abs(md.geometry.base[pos]-md.geometry.bed[pos])>10**-10): + md.checkmessage("base not equal to bed on grounded ice!") + if any(md.geometry.bed - md.geometry.base > 10**-9): + md.checkmessage("bed superior to base on floating ice!") + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'data',StringToEnum(self.migration)[0],'enum',GroundinglineMigrationEnum(),'format','Integer') + # }}} Index: ../trunk-jpl/src/py3/classes/verbose.py =================================================================== --- ../trunk-jpl/src/py3/classes/verbose.py (revision 0) +++ ../trunk-jpl/src/py3/classes/verbose.py (revision 19895) @@ -0,0 +1,138 @@ +from pairoptions import pairoptions +import MatlabFuncs as m +from EnumDefinitions import * +from WriteData import WriteData + +class verbose(object): + """ + VERBOSE class definition + + Available verbosity levels: + mprocessor : model processing + module : modules + solution : solution sequence + solver : solver info (extensive) + convergence : convergence criteria + control : control method + qmu : sensitivity analysis + autodiff : AD analysis + smb : SMB analysis + + Usage: + verbose=verbose(); + verbose=verbose(3); + verbose=verbose('001100'); + verbose=verbose('module',True,'solver',False); + + WARNING: some parts of this file are Synchronized with src/c/shared/Numerics/Verbosity.h + Do not modify these sections. See src/c/shared/Numerics/README for more info + """ + + def __init__(self,*args,**kwargs): # {{{ + #BEGINFIELDS + self.mprocessor = False + self.module = False + self.solution = False + self.solver = False + self.convergence = False + self.control = False + self.qmu = False + self.autodiff = False + self.smb = False + #ENDFIELDS + + if not kwargs and not args: + #Don't do anything + self.solution=True; + self.qmu=True; + self.control=True; + pass + + elif len(args) == 1: + binary=args[0] + if isinstance(binary,str): + if binary.lower()=='all': + binary=2**11-1 #all ones + self.BinaryToVerbose(binary) + self.solver=False #Do not use by default + else: + binary=int(binary,2) + self.BinaryToVerbose(binary) + elif isinstance(binary,(int,float)): + self.BinaryToVerbose(int(binary)) + + else: + #Use options to initialize object + self=pairoptions(**kwargs).AssignObjectFields(self) + + #Cast to logicals + listproperties=vars(self) + for fieldname,fieldvalue in list(listproperties.items()): + if isinstance(fieldvalue,bool) or isinstance(fieldvalue,(int,float)): + setattr(self,fieldname,bool(fieldvalue)) + else: + raise TypeError("verbose supported field values are logicals only (True or False)") + # }}} + def __repr__(self): # {{{ + + #BEGINDISP + s ="class '%s' = \n" % type(self) + s+=" %15s : %s\n" % ('mprocessor',self.mprocessor) + s+=" %15s : %s\n" % ('module',self.module) + s+=" %15s : %s\n" % ('solution',self.solution) + s+=" %15s : %s\n" % ('solver',self.solver) + s+=" %15s : %s\n" % ('convergence',self.convergence) + s+=" %15s : %s\n" % ('control',self.control) + s+=" %15s : %s\n" % ('qmu',self.qmu) + s+=" %15s : %s\n" % ('autodiff',self.autodiff) + s+=" %15s : %s\n" % ('smb',self.smb) + #ENDDISP + + return s + # }}} + def VerboseToBinary(self): # {{{ + + #BEGINVERB2BIN + binary=0 + if self.mprocessor: + binary=binary | 1 + if self.module: + binary=binary | 2 + if self.solution: + binary=binary | 4 + if self.solver: + binary=binary | 8 + if self.convergence: + binary=binary | 16 + if self.control: + binary=binary | 32 + if self.qmu: + binary=binary | 64 + if self.autodiff: + binary=binary | 128 + if self.smb: + binary=binary | 256 + #ENDVERB2BIN + + return binary + # }}} + def BinaryToVerbose(self,binary): # {{{ + + #BEGINBIN2VERB + self.mprocessor =bool(binary & 1) + self.module =bool(binary & 2) + self.solution =bool(binary & 4) + self.solver =bool(binary & 8) + self.convergence=bool(binary & 16) + self.control =bool(binary & 32) + self.qmu =bool(binary & 64) + self.autodiff =bool(binary & 128) + self.smb =bool(binary & 256) + #ENDBIN2VERB + # }}} + def checkconsistency(self,md,solution,analyses): # {{{ + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'data',self.VerboseToBinary(),'enum',VerboseEnum(),'format','Integer') + # }}} Index: ../trunk-jpl/src/py3/classes/SMBd18opdd.py =================================================================== --- ../trunk-jpl/src/py3/classes/SMBd18opdd.py (revision 0) +++ ../trunk-jpl/src/py3/classes/SMBd18opdd.py (revision 19895) @@ -0,0 +1,138 @@ +import numpy +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +from project3d import project3d + +class SMBd18opdd(object): + """ + SMBd18opdd Class definition + + Usage: + SMBd18opdd=SMBd18opdd(); + """ + + def __init__(self): # {{{ + self.desfac = 0. + self.s0p = float('NaN') + self.s0t = float('NaN') + self.rlaps = 0. + self.rlapslgm = 0. + self.dpermil = 0. + self.Tdiff = float('NaN') + self.sealev = float('NaN') + self.ismungsm = 0 + self.isd18opd = 0 + self.delta18o = float('NaN') + self.delta18o_surface = float('NaN') + self.temperatures_presentday = float('NaN') + self.precipitations_presentday = float('NaN') + + #set defaults + self.setdefaultparameters() + self.requested_outputs = [] + #}}} + def __repr__(self): # {{{ + string=" surface forcings parameters:" + + string="%s\n%s"%(string,fielddisplay(self,'isd18opd','is delta18o parametrisation from present day temperature and precipitation activated (0 or 1, default is 0)')) + string="%s\n%s"%(string,fielddisplay(self,'desfac','desertification elevation factor (between 0 and 1, default is 0.5) [m]')) + string="%s\n%s"%(string,fielddisplay(self,'s0p','should be set to elevation from precip source (between 0 and a few 1000s m, default is 0) [m]')) + string="%s\n%s"%(string,fielddisplay(self,'s0t','should be set to elevation from temperature source (between 0 and a few 1000s m, default is 0) [m]')) + string="%s\n%s"%(string,fielddisplay(self,'rlaps','present day lapse rate [degree/km]')) + if self.isd18opd: + string="%s\n%s"%(string,fielddisplay(self,'temperatures_presentday','monthly present day surface temperatures [K], required if delta18o/mungsm is activated')) + string="%s\n%s"%(string,fielddisplay(self,'precipitations_presentday','monthly surface precipitation [m/yr water eq], required if delta18o or mungsm is activated')) + string="%s\n%s"%(string,fielddisplay(self,'delta18o','delta18o [per mil], required if pdd is activated and delta18o activated')) + string="%s\n%s"%(string,fielddisplay(self,'dpermil','degree per mil, required if d18opd is activated')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + + return string + #}}} + def extrude(self,md): # {{{ + + if self.isd18opd: self.temperatures_presentday=project3d(md,'vector',self.temperatures_presentday,'type','node') + if self.isd18opd: self.precipitations_presentday=project3d(md,'vector',self.precipitations_presentday,'type','node') + self.s0p=project3d(md,'vector',self.s0p,'type','node') + self.s0t=project3d(md,'vector',self.s0t,'type','node') + + return self + #}}} + def defaultoutputs(self,md): # {{{ + return [] + #}}} + def initialize(self,md): # {{{ + + if numpy.all(numpy.isnan(self.s0p)): + self.s0p=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMBd18opdd.s0p specified: values set as zero") + + if numpy.all(numpy.isnan(self.s0t)): + self.s0t=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMBd18opdd.s0t specified: values set as zero") + + return self + # }}} + def setdefaultparameters(self): # {{{ + + #pdd method not used in default mode + self.ismungsm = 0 + self.isd18opd = 1 + self.desfac = 0.5 + self.rlaps = 6.5 + self.rlapslgm = 6.5 + self.dpermil = 2.4 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.desfac','<=',1,'numel',[1]) + md = checkfield(md,'fieldname','smb.s0p','>=',0,'NaN',1,'size',[md.mesh.numberofvertices,1]) + md = checkfield(md,'fieldname','smb.s0t','>=',0,'NaN',1,'size',[md.mesh.numberofvertices,1]) + md = checkfield(md,'fieldname','smb.rlaps','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','smb.rlapslgm','>=',0,'numel',[1]) + + if self.isd18opd: + md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.delta18o','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + md = checkfield(md,'fieldname','smb.dpermil','>=',0,'numel',[1]) + + md = checkfield(md,'fieldname','masstransport.requested_outputs','stringrow',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',SmbEnum(),'data',SMBd18opddEnum(),'format','Integer') + + WriteData(fid,'object',self,'class','smb','fieldname','ismungsm','format','Boolean') + WriteData(fid,'object',self,'class','smb','fieldname','isd18opd','format','Boolean') + WriteData(fid,'object',self,'class','smb','fieldname','desfac','format','Double') + WriteData(fid,'object',self,'class','smb','fieldname','s0p','format','DoubleMat','mattype',1); + WriteData(fid,'object',self,'class','smb','fieldname','s0t','format','DoubleMat','mattype',1); + WriteData(fid,'object',self,'class','smb','fieldname','rlaps','format','Double') + WriteData(fid,'object',self,'class','smb','fieldname','rlapslgm','format','Double') + WriteData(fid,'object',self,'class','smb','fieldname','Tdiff','format','DoubleMat','mattype',1,'timeserieslength',2) + WriteData(fid,'object',self,'class','smb','fieldname','sealev','format','DoubleMat','mattype',1,'timeserieslength',2) + + if self.isd18opd: + WriteData(fid,'object',self,'class','smb','fieldname','temperatures_presentday','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','precipitations_presentday','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','delta18o','format','DoubleMat','mattype',1,'timeserieslength',2) + WriteData(fid,'object',self,'class','smb','fieldname','dpermil','format','Double') + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',SmbRequestedOutputsEnum(),'format','StringArray') + + # }}} Index: ../trunk-jpl/src/py3/classes/transient.py =================================================================== --- ../trunk-jpl/src/py3/classes/transient.py (revision 0) +++ ../trunk-jpl/src/py3/classes/transient.py (revision 19895) @@ -0,0 +1,129 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class transient(object): + """ + TRANSIENT class definition + + Usage: + transient=transient(); + """ + + def __init__(self): # {{{ + self.issmb = False + self.ismasstransport = False + self.isstressbalance = False + self.isthermal = False + self.isgroundingline = False + self.isgia = False + self.isdamageevolution = False + self.islevelset = False + self.iscalving = False + self.ishydrology = False + self.requested_outputs = [] + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' transient solution parameters:' + string="%s\n%s"%(string,fielddisplay(self,'issmb','indicates if a surface mass balance solution is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'ismasstransport','indicates if a masstransport solution is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'isstressbalance','indicates if a stressbalance solution is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'isthermal','indicates if a thermal solution is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'isgroundingline','indicates if a groundingline migration is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'isgia','indicates if a postglacial rebound is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'isdamageevolution','indicates whether damage evolution is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'islevelset','LEVELSET METHOD DESCRIPTION')) + string="%s\n%s"%(string,fielddisplay(self,'iscalving','indicates whether calving is used in the transient')) + string="%s\n%s"%(string,fielddisplay(self,'ishydrology','indicates whether an hydrology model is used')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','list of additional outputs requested')) + return string + #}}} + def defaultoutputs(self,md): # {{{ + + if self.issmb: + return ['SmbMassBalance'] + else: + return [] + + #}}} + def setallnullparameters(self): # {{{ + + #Nothing done + self.issmb = False + self.ismasstransport = False + self.isstressbalance = False + self.isthermal = False + self.isgroundingline = False + self.isgia = False + self.isdamageevolution = False + self.islevelset = False + self.iscalving = False + self.ishydrology = False + + #default output + self.requested_outputs=[] + return self + #}}} + def setdefaultparameters(self): # {{{ + + #full analysis: Stressbalance, Masstransport and Thermal but no groundingline migration for now + self.issmb = True + self.ismasstransport = True + self.isstressbalance = True + self.isthermal = True + self.isgroundingline = False + self.isgia = False + self.isdamageevolution = False + self.islevelset = False + self.iscalving = False + self.ishydrology = False + + #default output + self.requested_outputs=['default'] + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if not solution==TransientSolutionEnum(): + return md + + md = checkfield(md,'fieldname','transient.issmb','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.ismasstransport','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.isstressbalance','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.isthermal','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.isgroundingline','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.isgia','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.isdamageevolution','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.islevelset','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.ishydrology','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','transient.iscalving','numel',[1],'values',[0,1]); + md = checkfield(md,'fieldname','transient.requested_outputs','stringrow',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','issmb','format','Boolean') + WriteData(fid,'object',self,'fieldname','ismasstransport','format','Boolean') + WriteData(fid,'object',self,'fieldname','isstressbalance','format','Boolean') + WriteData(fid,'object',self,'fieldname','isthermal','format','Boolean') + WriteData(fid,'object',self,'fieldname','isgroundingline','format','Boolean') + WriteData(fid,'object',self,'fieldname','isgia','format','Boolean') + WriteData(fid,'object',self,'fieldname','isdamageevolution','format','Boolean') + WriteData(fid,'object',self,'fieldname','islevelset','format','Boolean') + WriteData(fid,'object',self,'fieldname','ishydrology','format','Boolean') + WriteData(fid,'object',self,'fieldname','iscalving','format','Boolean') + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',TransientRequestedOutputsEnum(),'format','StringArray') + # }}} Index: ../trunk-jpl/src/py3/classes/pairoptions.py =================================================================== --- ../trunk-jpl/src/py3/classes/pairoptions.py (revision 0) +++ ../trunk-jpl/src/py3/classes/pairoptions.py (revision 19895) @@ -0,0 +1,174 @@ +from collections import OrderedDict +from WriteData import WriteData + +class pairoptions(object): + """ + PAIROPTIONS class definition + + Usage: + pairoptions=pairoptions(); + pairoptions=pairoptions('module',true,'solver',false); + """ + + def __init__(self,**kwargs): # {{{ + self.functionname = '' + self.list = OrderedDict() + + #get calling function name + import inspect + if len(inspect.stack()) > 1: + self.functionname=inspect.stack()[1][3] + + #initialize list + if not len(kwargs): + pass #Do nothing, + else: + self.buildlist(**kwargs) + # }}} + def __repr__(self): # {{{ + s=" functionname: '%s'\n" % self.functionname + if self.list: + s+=" list: (%ix%i)\n\n" % (len(self.list),2) + for item in list(self.list.items()): + if isinstance(item[1],str): + s+=" field: %-10s value: '%s'\n" % (item[0],item[1]) + elif isinstance(item[1],(bool,int,float)): + s+=" field: %-10s value: %g\n" % (item[0],item[1]) + else: + s+=" field: %-10s value: %s\n" % (item[0],type(item[1])) + else: + s+=" list: empty\n" + return s + # }}} + def buildlist(self,**kwargs): # {{{ + """BUILDLIST - build list of objects from input""" + # #check length of input + # if len(arg) % 2: + # raise TypeError('Invalid parameter/value pair arguments') + # numoptions = len(arg)/2 + + # #go through arg and build list of objects + # for i in xrange(numoptions): + # if isinstance(arg[2*i],(str,unicode)): + # self.list[arg[2*i]] = arg[2*i+1]; + # else: + # #option is not a string, ignore it + # print "WARNING: option number %d is not a string and will be ignored." % (i+1) + + #go through arg and build list of objects + print(kwargs) + for name,value in kwargs.items(): + self.list[name] = value + # }}} + def addfield(self,field,value): # {{{ + """ADDFIELD - add a field to an options list""" + if isinstance(field,str): + if field in self.list: + print(("WARNING: field '%s' with value=%s exists and will be overwritten with value=%s." % (field,str(self.list[field]),str(value)))) + self.list[field] = value + # }}} + def addfielddefault(self,field,value): # {{{ + """ADDFIELDDEFAULT - add a field to an options list if it does not already exist""" + if isinstance(field,str): + if field not in self.list: + self.list[field] = value + # }}} + def AssignObjectFields(self,obj2): # {{{ + """ASSIGNOBJECTFIELDS - assign object fields from options""" + for item in list(self.list.items()): + if item[0] in dir(obj2): + setattr(obj2,item[0],item[1]) + else: + print(("WARNING: field '%s' is not a property of '%s'." % (item[0],type(obj2)))) + return obj2 + # }}} + def changefieldvalue(self,field,newvalue): # {{{ + """CHANGEOPTIONVALUE - change the value of an option in an option list""" + + self.list[field]=newvalue; + # }}} + def exist(self,field): # {{{ + """EXIST - check if the option exist""" + + #some argument checking: + if field == None or field == '': + raise ValueError('exist error message: bad usage'); + if not isinstance(field,str): + raise TypeError("exist error message: field '%s' should be a string." % str(field)); + + #Recover option + if field in self.list: + return True + else: + return False + # }}} + def getfieldvalue(self,field,default=None): # {{{ + """ + GETOPTION - get the value of an option + + Usage: + value=options.getfieldvalue(field,default) + + Find an option value from a field. A default option + can be given in input if the field does not exist + + Examples: + value=options.getfieldvalue(options,'caxis') + value=options.getfieldvalue(options,'caxis',[0 2]) + """ + + #some argument checking: + if field == None or field == '': + raise ValueError('getfieldvalue error message: bad usage'); + if not isinstance(field,str): + raise TypeError("getfieldvalue error message: field '%s' should be a string." % str(field)); + + #Recover option + if field in self.list: + value=self.list[field] + else: + if not default == None: + value=default + else: + raise KeyError("error message: field '%s' has not been provided by user (and no default value has been specified)." % field) + + return value + # }}} + def removefield(self,field,warn): # {{{ + """ + REMOVEFIELD - delete a field in an option list + + Usage: + obj=removefield(self,field,warn) + + if warn==1 display an info message to warn user that + some of his options have been removed. + """ + + #check if field exist + if field in self.list: + + #remove duplicates from the options list + del self.list[field] + + #warn user if requested + if warn: + print(("removefield info: option '%s' has been removed from the list of options." % field)) + # }}} + def marshall(self,md,fid,firstindex): # {{{ + + for i,item in enumerate(self.list.items()): + name = item[0] + value = item[1] + + #Write option name + WriteData(fid,'enum',(firstindex-1)+2*i+1,'data',name,'format','String') + + #Write option value + if isinstance(value,str): + WriteData(fid,'enum',(firstindex-1)+2*i+2,'data',value,'format','String') + elif isinstance(value,(bool,int,float)): + WriteData(fid,'enum',(firstindex-1)+2*i+2,'data',value,'format','Double') + else: + raise TypeError("Cannot marshall option '%s': format not supported yet." % name) + # }}} Index: ../trunk-jpl/src/py3/classes/linearbasalforcings.py =================================================================== --- ../trunk-jpl/src/py3/classes/linearbasalforcings.py (revision 0) +++ ../trunk-jpl/src/py3/classes/linearbasalforcings.py (revision 19895) @@ -0,0 +1,108 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import numpy + +class linearbasalforcings(object): + """ + LINEAR BASAL FORCINGS class definition + + Usage: + basalforcings=linearbasalforcings(); + """ + + def __init__(self,*args): # {{{ + + if not len(args): + print('empty init') + self.groundedice_melting_rate = float('NaN') + self.deepwater_melting_rate = 0. + self.deepwater_elevation = 0. + self.upperwater_elevation = 0. + self.geothermalflux = float('NaN') + + #set defaults + self.setdefaultparameters() + elif len(args)==1 and args[0].__module__=='basalforcings': + print('converting basalforings to linearbasalforcings') + inv=args[0] + self.groundedice_melting_rate = inv.groundedice_melting_rate + self.geothermalflux = inv.geothermalflux + self.deepwater_melting_rate = 0. + self.deepwater_elevation = 0. + self.upperwater_elevation = 0. + + #set defaults + self.setdefaultparameters() + else: + raise Exception('constructor not supported') + + #}}} + def __repr__(self): # {{{ + string=" linear basal forcings parameters:" + + string="%s\n%s"%(string,fielddisplay(self,"groundedice_melting_rate","basal melting rate (positive if melting) [m/yr]")) + string="%s\n%s"%(string,fielddisplay(self,"deepwater_melting_rate","basal melting rate (positive if melting applied for floating ice whith base < deepwater_elevation) [m/yr]")) + string="%s\n%s"%(string,fielddisplay(self,"deepwater_elevation","elevation of ocean deepwater [m]")) + string="%s\n%s"%(string,fielddisplay(self,"upperwater_elevation","elevation of ocean upper water [m]")) + string="%s\n%s"%(string,fielddisplay(self,"geothermalflux","geothermal heat flux [W/m^2]")) + return string + #}}} + def initialize(self,md): # {{{ + + if numpy.all(numpy.isnan(self.groundedice_melting_rate)): + self.groundedice_melting_rate=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no basalforcings.groundedice_melting_rate specified: values set as zero") + + return self + #}}} + def setdefaultparameters(self): # {{{ + + self.deepwater_melting_rate = 50.0 + self.deepwater_elevation = -800.0 + self.upperwater_elevation = -400.0 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if MasstransportAnalysisEnum() in analyses and not (solution==TransientSolutionEnum() and not md.transient.ismasstransport): + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.deepwater_melting_rate','>=',0); + md = checkfield(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation); + md = checkfield(md,'fieldname','basalforcings.upperwater_elevation','<',0); + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','basalforcings.deepwater_melting_rate','>=',0); + md = checkfield(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation); + md = checkfield(md,'fieldname','basalforcings.upperwater_elevation','<',0); + + if ThermalAnalysisEnum() in analyses and not (solution==TransientSolutionEnum() and not md.transient.isthermal): + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.deepwater_melting_rate','>=',0); + md = checkfield(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation); + md = checkfield(md,'fieldname','basalforcings.upperwater_elevation','<',0); + md = checkfield(md,'fieldname','basalforcings.geothermalflux','NaN',1,'timeseries',1,'>=',0) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + floatingice_melting_rate = numpy.zeros((md.mesh.numberofvertices,1)) + pos=numpy.nonzero(md.geometry.base<=md.basalforcings.deepwater_elevation) + floatingice_melting_rate[pos]=md.basalforcings.deepwater_melting_rate + pos=numpy.nonzero(numpy.logical_and(md.geometry.base>md.basalforcings.deepwater_elevation,md.geometry.base=',0,'NaN',1,'size',[md.mesh.numberofvertices,1]) + md = checkfield(md,'fieldname','smb.s0t','>=',0,'NaN',1,'size',[md.mesh.numberofvertices,1]) + md = checkfield(md,'fieldname','smb.rlaps','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','smb.rlapslgm','>=',0,'numel',[1]) + + if (self.isdelta18o==0 and self.ismungsm==0): + md = checkfield(md,'fieldname','smb.monthlytemperatures','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.precipitation','NaN',1,'timeseries',1) + elif self.isdelta18o: + md = checkfield(md,'fieldname','smb.delta18o','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + md = checkfield(md,'fieldname','smb.delta18o_surface','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.temperatures_lgm','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.precipitations_lgm','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.Tdiff','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + md = checkfield(md,'fieldname','smb.sealev','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + elif self.ismungsm: + md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.temperatures_lgm','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.precipitations_lgm','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','smb.Pfac','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + md = checkfield(md,'fieldname','smb.Tdiff','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + md = checkfield(md,'fieldname','smb.sealev','NaN',1,'size',[2,numpy.nan],'singletimeseries',1) + + md = checkfield(md,'fieldname','masstransport.requested_outputs','stringrow',1) + return md + #}}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',SmbEnum(),'data',SMBpddEnum(),'format','Integer') + + WriteData(fid,'object',self,'class','smb','fieldname','isdelta18o','format','Boolean') + WriteData(fid,'object',self,'class','smb','fieldname','ismungsm','format','Boolean') + WriteData(fid,'object',self,'class','smb','fieldname','desfac','format','Double') + WriteData(fid,'object',self,'class','smb','fieldname','s0p','format','DoubleMat','mattype',1); + WriteData(fid,'object',self,'class','smb','fieldname','s0t','format','DoubleMat','mattype',1); + WriteData(fid,'object',self,'class','smb','fieldname','rlaps','format','Double') + WriteData(fid,'object',self,'class','smb','fieldname','rlapslgm','format','Double') + + if (self.isdelta18o==0 and self.ismungsm==0): + WriteData(fid,'object',self,'class','smb','fieldname','monthlytemperatures','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','precipitation','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + elif self.isdelta18o: + WriteData(fid,'object',self,'class','smb','fieldname','temperatures_presentday','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','temperatures_lgm','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','precipitations_presentday','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','precipitations_lgm','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','delta18o_surface','format','DoubleMat','mattype',1,'timeserieslength',2) + WriteData(fid,'object',self,'class','smb','fieldname','delta18o','format','DoubleMat','mattype',1,'timeserieslength',2) + WriteData(fid,'object',self,'class','smb','fieldname','Tdiff','format','DoubleMat','mattype',1,'timeserieslength',2) + WriteData(fid,'object',self,'class','smb','fieldname','sealev','format','DoubleMat','mattype',1,'timeserieslength',2) + elif self.ismungsm: + WriteData(fid,'object',self,'class','smb','fieldname','temperatures_presentday','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','temperatures_lgm','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','precipitations_presentday','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','precipitations_lgm','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','Pfac','format','DoubleMat','mattype',1,'timeserieslength',2) + WriteData(fid,'object',self,'class','smb','fieldname','Tdiff','format','DoubleMat','mattype',1,'timeserieslength',2) + WriteData(fid,'object',self,'class','smb','fieldname','sealev','format','DoubleMat','mattype',1,'timeserieslength',2) + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',SmbRequestedOutputsEnum(),'format','StringArray') + + # }}} Index: ../trunk-jpl/src/py3/classes/bamggeom.py =================================================================== --- ../trunk-jpl/src/py3/classes/bamggeom.py (revision 0) +++ ../trunk-jpl/src/py3/classes/bamggeom.py (revision 19895) @@ -0,0 +1,45 @@ +import numpy + +class bamggeom(object): + """ + BAMGGEOM class definition + + Usage: + bamggeom(varargin) + """ + + def __init__(self,*args): # {{{ + self.Vertices=numpy.empty((0,3)) + self.Edges=numpy.empty((0,3)) + self.TangentAtEdges=numpy.empty((0,4)) + self.Corners=numpy.empty((0,1)) + self.RequiredVertices=numpy.empty((0,1)) + self.RequiredEdges=numpy.empty((0,1)) + self.CrackedEdges=numpy.empty((0,0)) + self.SubDomains=numpy.empty((0,4)) + + if not len(args): + # if no input arguments, create a default object + pass + + elif len(args) == 1: + object=args[0] + for field in list(object.keys()): + if field in vars(self): + setattr(self,field,object[field]) + + else: + raise TypeError("bamggeom constructor error message: unknown type of constructor call") + # }}} + def __repr__(self): # {{{ + s ="class '%s' object '%s' = \n" % (type(self),'self') + s+=" Vertices: %s\n" % str(self.Vertices) + s+=" Edges: %s\n" % str(self.Edges) + s+=" TangentAtEdges: %s\n" % str(self.TangentAtEdges) + s+=" Corners: %s\n" % str(self.Corners) + s+=" RequiredVertices: %s\n" % str(self.RequiredVertices) + s+=" RequiredEdges: %s\n" % str(self.RequiredEdges) + s+=" CrackedEdges: %s\n" % str(self.CrackedEdges) + s+=" SubDomains: %s\n" % str(self.SubDomains) + return s + # }}} Index: ../trunk-jpl/src/py3/classes/damage.py =================================================================== --- ../trunk-jpl/src/py3/classes/damage.py (revision 0) +++ ../trunk-jpl/src/py3/classes/damage.py (revision 19895) @@ -0,0 +1,173 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class damage(object): + """ + DAMAGE class definition + + Usage: + damage=damage() + """ + + def __init__(self,*args): # {{{ + + #damage: + self.isdamage = 0. + self.D = float('NaN') + self.law = float('NaN') + self.spcdamage = float('NaN') + self.max_damage = float('NaN') + + #numerical + self.stabilization = float('NaN') + self.maxiter = float('NaN') + self.elementinterp = '' + + #general parameters for evolution law: + self.stress_threshold = float('NaN') + self.kappa = float('NaN') + self.c1 = float('NaN') + self.c2 = float('NaN') + self.c3 = float('NaN') + self.c4 = float('NaN') + self.healing = float('NaN') + self.equiv_stress = float('NaN') + self.requested_outputs = [] + + if not len(args): + self.setdefaultparameters() + else: + raise RuntimeError("constructor not supported") + + # }}} + def __repr__(self): # {{{ + s =' Damage:\n' + + s+="%s\n" % fielddisplay(self,"isdamage","is damage mechanics being used? [0 (default) or 1]") + if self.isdamage: + s+="%s\n" % fielddisplay(self,"D","damage tensor (scalar for now)") + s+="%s\n" % fielddisplay(self,"law","damage law ['0: analytical','1: pralong']") + s+="%s\n" % fielddisplay(self,"spcdamage","damage constraints (NaN means no constraint)") + s+="%s\n" % fielddisplay(self,"max_damage","maximum possible damage (0<=max_damage<1)") + s+="%s\n" % fielddisplay(self,"stabilization","0: no, 1: artificial_diffusivity, 2: SUPG (not working), 4: Flux corrected transport") + s+="%s\n" % fielddisplay(self,"maxiter","maximum number of non linear iterations") + s+="%s\n" % fielddisplay(self,"elementinterp","interpolation scheme for finite elements [''P1'',''P2'']") + s+="%s\n" % fielddisplay(self,"stress_threshold","stress threshold for damage initiation [Pa]") + s+="%s\n" % fielddisplay(self,"kappa","ductility parameter for stress softening and damage [>1]") + s+="%s\n" % fielddisplay(self,"c1","damage parameter 1 ") + s+="%s\n" % fielddisplay(self,"c2","damage parameter 2 ") + s+="%s\n" % fielddisplay(self,"c3","damage parameter 3 ") + s+="%s\n" % fielddisplay(self,"c4","damage parameter 4 ") + s+="%s\n" % fielddisplay(self,"healing","damage healing parameter") + s+="%s\n" % fielddisplay(self,"equiv_stress","0: von Mises, 1: max principal") + s+="%s\n" % fielddisplay(self,'requested_outputs','additional outputs requested') + + return s + # }}} + def extrude(self,md): # {{{ + self.D=project3d(md,'vector',self.D,'type','node') + self.spcdamage=project3d(md,'vector',self.spcdamage,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + + #damage parameters: + self.isdamage=0 + self.D=0 + self.law=0 + + self.max_damage=1-1e-5 #if damage reaches 1, solve becomes singular, as viscosity becomes nil + + #Type of stabilization used + self.stabilization=4 + + #Maximum number of iterations + self.maxiter=100 + + #finite element interpolation + self.elementinterp='P1' + + #damage evolution parameters + self.stress_threshold=1.3e5 + self.kappa=2.8 + self.c1=0 + self.c2=0 + self.c3=0 + self.c4=0 + self.healing=0 + self.equiv_stress=0 + + #output default: + self.requested_outputs=['default'] + + return self + # }}} + def defaultoutputs(self,md): # {{{ + + if md.mesh.domaintype().lower()=='2dhorizontal': + list = ['DamageDbar'] + else: + list = ['DamageD'] + return list + + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,'fieldname','damage.isdamage','numel',[1],'values',[0,1]) + if self.isdamage: + md = checkfield(md,'fieldname','damage.D','>=',0,'<=',self.max_damage,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','damage.max_damage','<',1,'>=',0) + md = checkfield(md,'fieldname','damage.law','numel',[1],'values',[0,1,2,3]) + md = checkfield(md,'fieldname','damage.spcdamage','timeseries',1) + md = checkfield(md,'fieldname','damage.stabilization','numel',[1],'values',[0,1,2,4]) + md = checkfield(md,'fieldname','damage.maxiter','>=0',0) + md = checkfield(md,'fieldname','damage.elementinterp','values',['P1','P2']) + md = checkfield(md,'fieldname','damage.stress_threshold','>=',0) + md = checkfield(md,'fieldname','damage.kappa','>',1) + md = checkfield(md,'fieldname','damage.healing','>=',0) + md = checkfield(md,'fieldname','damage.c1','>=',0) + md = checkfield(md,'fieldname','damage.c2','>=',0) + md = checkfield(md,'fieldname','damage.c3','>=',0) + md = checkfield(md,'fieldname','damage.c4','>=',0) + md = checkfield(md,'fieldname','damage.healing','>=',0) + md = checkfield(md,'fieldname','damage.equiv_stress','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','damage.requested_outputs','stringrow',1) + elif self.law != 0: + if (solution==DamageEvolutionSolutionEnum): + raise RuntimeError('Invalid evolution law (md.damage.law) for a damage solution') + + return md + # }}} + def marshall(self,md,fid): # {{{ + + WriteData(fid,'object',self,'fieldname','isdamage','format','Boolean') + if self.isdamage: + WriteData(fid,'object',self,'fieldname','D','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','law','format','Integer') + WriteData(fid,'object',self,'fieldname','spcdamage','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','max_damage','format','Double') + WriteData(fid,'object',self,'fieldname','stabilization','format','Integer') + WriteData(fid,'object',self,'fieldname','maxiter','format','Integer') + WriteData(fid,'enum',DamageElementinterpEnum(),'data',StringToEnum(self.elementinterp)[0],'format','Integer') + WriteData(fid,'object',self,'fieldname','stress_threshold','format','Double') + WriteData(fid,'object',self,'fieldname','kappa','format','Double') + WriteData(fid,'object',self,'fieldname','c1','format','Double') + WriteData(fid,'object',self,'fieldname','c2','format','Double') + WriteData(fid,'object',self,'fieldname','c3','format','Double') + WriteData(fid,'object',self,'fieldname','c4','format','Double') + WriteData(fid,'object',self,'fieldname','healing','format','Double') + WriteData(fid,'object',self,'fieldname','equiv_stress','format','Integer') + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',DamageEvolutionRequestedOutputsEnum(),'format','StringArray') + # }}} Index: ../trunk-jpl/src/py3/classes/taoinversion.py =================================================================== --- ../trunk-jpl/src/py3/classes/taoinversion.py (revision 0) +++ ../trunk-jpl/src/py3/classes/taoinversion.py (revision 19895) @@ -0,0 +1,200 @@ +import numpy +from project3d import project3d +from WriteData import WriteData +from checkfield import checkfield +from fielddisplay import fielddisplay +from IssmConfig import IssmConfig +from EnumDefinitions import * +from marshallcostfunctions import marshallcostfunctions + + +class taoinversion: + def __init__(self): + iscontrol = 0 + incomplete_adjoint = 0 + control_parameters = float('NaN') + maxsteps = 0 + maxiter = 0 + fatol = 0 + frtol = 0 + gatol = 0 + grtol = 0 + gttol = 0 + algorithm = '' + cost_functions = float('NaN') + cost_functions_coefficients = float('NaN') + min_parameters = float('NaN') + max_parameters = float('NaN') + vx_obs = float('NaN') + vy_obs = float('NaN') + vz_obs = float('NaN') + vel_obs = float('NaN') + thickness_obs = float('NaN') + surface_obs = float('NaN') + + def __repr__(self): + string = ' taoinversion parameters:' + string = "%s\n\%s"%(string, fieldstring(self,'iscontrol','is inversion activated?')) + string="%s\n%s"%(string,fieldstring(self,'mantle_viscosity','mantle viscosity constraints (NaN means no constraint) (Pa s)')) + string="%s\n%s"%(string,fieldstring(self,'lithosphere_thickness','lithosphere thickness constraints (NaN means no constraint) (m)')) + string="%s\n%s"%(string,fieldstring(self,'cross_section_shape',"1: square-edged, 2: elliptical-edged surface")) + string="%s\n%s"%(string,fieldstring(self,'incomplete_adjoint','1: linear viscosity, 0: non-linear viscosity')) + string="%s\n%s"%(string,fieldstring(self,'control_parameters','ex: {''FrictionCoefficient''}, or {''MaterialsRheologyBbar''}')) + string="%s\n%s"%(string,fieldstring(self,'maxsteps','maximum number of iterations (gradient computation)')) + string="%s\n%s"%(string,fieldstring(self,'maxiter','maximum number of Function evaluation (forward run)')) + string="%s\n%s"%(string,fieldstring(self,'fatol','convergence criterion: f(X)-f(X*) (X: current iteration, X*: "true" solution, f: cost function)')) + string="%s\n%s"%(string,fieldstring(self,'frtol','convergence criterion: |f(X)-f(X*)|/|f(X*)|')) + string="%s\n%s"%(string,fieldstring(self,'gatol','convergence criterion: ||g(X)|| (g: gradient of the cost function)')) + string="%s\n%s"%(string,fieldstring(self,'grtol','convergence criterion: ||g(X)||/|f(X)|')) + string="%s\n%s"%(string,fieldstring(self,'gttol','convergence criterion: ||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)')) + string="%s\n%s"%(string,fieldstring(self,'algorithm','minimization algorithm: ''tao_blmvm'', ''tao_cg'', ''tao_lmvm''')) + string="%s\n%s"%(string,fieldstring(self,'cost_functions','indicate the type of response for each optimization step')) + string="%s\n%s"%(string,fieldstring(self,'cost_functions_coefficients','cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) + string="%s\n%s"%(string,fieldstring(self,'min_parameters','absolute minimum acceptable value of the inversed parameter on each vertex')) + string="%s\n%s"%(string,fieldstring(self,'max_parameters','absolute maximum acceptable value of the inversed parameter on each vertex')) + string="%s\n%s"%(string,fieldstring(self,'vx_obs','observed velocity x component [m/yr]')) + string="%s\n%s"%(string,fieldstring(self,'vy_obs','observed velocity y component [m/yr]')) + string="%s\n%s"%(string,fieldstring(self,'vel_obs','observed velocity magnitude [m/yr]')) + string="%s\n%s"%(string,fieldstring(self,'thickness_obs','observed thickness [m]')) + string="%s\n%s"%(string,fieldstring(self,'surface_obs','observed surface elevation [m]')) + string="%s\n%s"%(string,'Available cost functions:') + string="%s\n%s"%(string, ' 101: SurfaceAbsVelMisfit') + string="%s\n%s"%(string, ' 102: SurfaceRelVelMisfit') + string="%s\n%s"%(string, ' 103: SurfaceLogVelMisfit') + string="%s\n%s"%(string, ' 104: SurfaceLogVxVyMisfit') + string="%s\n%s"%(string, ' 105: SurfaceAverageVelMisfit') + string="%s\n%s"%(string, ' 201: ThicknessAbsMisfit') + string="%s\n%s"%(string, ' 501: DragCoefficientAbsGradient') + string="%s\n%s"%(string, ' 502: RheologyBbarAbsGradient') + string="%s\n%s"%(string, ' 503: ThicknessAbsGradient') + return string + def setdefaultparameters(self): + + #default is incomplete adjoint for now + self.incomplete_adjoint=1 + + #parameter to be inferred by control methods (only + #drag and B are supported yet) + self.control_parameters=['FrictionCoefficient'] + + #number of iterations and steps + self.maxsteps=20; + self.maxiter =30; + + #default tolerances + self.fatol = 0; + self.frtol = 0; + self.gatol = 0; + self.grtol = 0; + self.gttol = 1e-4; + + #minimization algorithm + PETSCMAJOR = IssmConfig('_PETSC_MAJOR_') + PETSCMINOR = IssmConfig('_PETSC_MINOR_') + if(PETSCMAJOR>3 or (PETSCMAJOR==3 and PETSCMINOR>=5)): + self.algorithm = 'blmvm'; + else: + self.algorithm = 'tao_blmvm'; + + #several responses can be used: + self.cost_functions=101; + + return self + + def extrude(self,md): + self.vx_obs=project3d(md,'vector',self.vx_obs,'type','node') + self.vy_obs=project3d(md,'vector',self.vy_obs,'type','node') + self.vel_obs=project3d(md,'vector',self.vel_obs,'type','node') + self.thickness_obs=project3d(md,'vector',self.thickness_obs,'type','node') + + if numel(self.cost_functions_coefficients) > 1: + self.cost_functions_coefficients=project3d(md,'vector',self.cost_functions_coefficients,'type','node') + + if numel(self.min_parameters) > 1: + self.min_parameters=project3d(md,'vector',self.min_parameters,'type','node') + + if numel(self.max_parameters)>1: + self.max_parameters=project3d(md,'vector',self.max_parameters,'type','node') + + return self + + def checkconsistency(self,md,solution,analyses): + if not self.control: + return md + if not IssmConfig('_HAVE_TAO_'): + md = checkmessage(md,['TAO has not been installed, ISSM needs to be reconfigured and recompiled with TAO']) + + + num_controls= numpy.numel(md.inversion.control_parameters) + num_costfunc= numpy.size(md.inversion.cost_functions,2) + + md = checkfield(md,'fieldname','inversion.iscontrol','values',[0, 1]) + md = checkfield(md,'fieldname','inversion.incomplete_adjoint','values',[0, 1]) + md = checkfield(md,'fieldname','inversion.control_parameters','cell',1,'values',supportedcontrols()) + md = checkfield(md,'fieldname','inversion.maxsteps','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.maxiter','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.fatol','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.frtol','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.gatol','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.grtol','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.gttol','numel',1,'>=',0) + + + PETSCMAJOR = IssmConfig('_PETSC_MAJOR_') + PETSCMINOR = IssmConfig('_PETSC_MINOR_') + if(PETSCMAJOR>3 or (PETSCMAJOR==3 and PETSCMINOR>=5)): + md = checkfield(md,'fieldname','inversion.algorithm','values',{'blmvm','cg','lmvm'}) + else: + md = checkfield(md,'fieldname','inversion.algorithm','values',{'tao_blmvm','tao_cg','tao_lmvm'}) + + + md = checkfield(md,'fieldname','inversion.cost_functions','size',[1, num_costfunc],'values',supportedcostfunctions()) + md = checkfield(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices, num_costfunc],'>=',0) + md = checkfield(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices, num_controls]) + md = checkfield(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices, num_controls]) + + + if solution==BalancethicknessSolutionEnum(): + md = checkfield(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],'NaN',1) + elif solution==BalancethicknessSoftSolutionEnum(): + md = checkfield(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],'NaN',1) + else: + md = checkfield(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices, 1],'NaN',1) + md = checkfield(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices, 1],'NaN',1) + + def marshall(self, md, fid): + + yts=365.0*24.0*3600.0; + WriteData(fid,'object',self,'class','inversion','fieldname','iscontrol','format','Boolean') + WriteData(fid,'enum',InversionTypeEnum(),'data',1,'format','Integer') + if not self.iscontrol: + return + WriteData(fid,'object',self,'class','inversion','fieldname','incomplete_adjoint','format','Boolean') + WriteData(fid,'object',self,'class','inversion','fieldname','maxsteps','format','Integer') + WriteData(fid,'object',self,'class','inversion','fieldname','maxiter','format','Integer') + WriteData(fid,'object',self,'class','inversion','fieldname','fatol','format','Double') + WriteData(fid,'object',self,'class','inversion','fieldname','frtol','format','Double') + WriteData(fid,'object',self,'class','inversion','fieldname','gatol','format','Double') + WriteData(fid,'object',self,'class','inversion','fieldname','grtol','format','Double') + WriteData(fid,'object',self,'class','inversion','fieldname','gttol','format','Double') + WriteData(fid,'object',self,'class','inversion','fieldname','algorithm','format','String') + WriteData(fid,'object',self,'class','inversion','fieldname','cost_functions_coefficients','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','inversion','fieldname','min_parameters','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'class','inversion','fieldname','max_parameters','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'class','inversion','fieldname','vx_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'class','inversion','fieldname','vy_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'class','inversion','fieldname','vz_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'class','inversion','fieldname','thickness_obs','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','inversion','fieldname','surface_obs','format','DoubleMat','mattype',1) + + #process control parameters + num_control_parameters = numpy.numel(self.control_parameters) + data = numpy.array([StringToEnum(self.control_parameter[0]) for control_parameter in self.control_parameters]).reshape(1,-1) + WriteData(fid,'data',data,'enum',InversionControlParametersEnum(),'format','DoubleMat','mattype',3) + WriteData(fid,'data',num_control_parameters,'enum',InversionNumControlParametersEnum(),'format','Integer') + + #process cost functions + num_cost_functions = numpy.size(self.cost_functions,2) + data= marshallcostfunctions(self.cost_functions) + WriteData(fid,'data',data,'enum',InversionCostFunctionsEnum(),'format','DoubleMat','mattype',3) + WriteData(fid,'data',num_cost_functions,'enum',InversionNumCostFunctionsEnum(),'format','Integer') Property changes on: ../trunk-jpl/src/py3/classes/taoinversion.py ___________________________________________________________________ Added: svn:executable + * Index: ../trunk-jpl/src/py3/classes/masstransport.py =================================================================== --- ../trunk-jpl/src/py3/classes/masstransport.py (revision 0) +++ ../trunk-jpl/src/py3/classes/masstransport.py (revision 19895) @@ -0,0 +1,102 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData + +class masstransport(object): + """ + MASSTRANSPORT class definition + + Usage: + masstransport=masstransport(); + """ + + def __init__(self): # {{{ + self.spcthickness = float('NaN') + self.isfreesurface = 0 + self.min_thickness = 0. + self.hydrostatic_adjustment = 0 + self.stabilization = 0 + self.vertex_pairing = float('NaN') + self.penalty_factor = 0 + self.requested_outputs = [] + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' Masstransport solution parameters:' + string="%s\n%s"%(string,fielddisplay(self,'spcthickness','thickness constraints (NaN means no constraint) [m]')) + string="%s\n%s"%(string,fielddisplay(self,'isfreesurface','do we use free surfaces (FS only) are mass conservation')) + string="%s\n%s"%(string,fielddisplay(self,'min_thickness','minimum ice thickness allowed [m]')) + string="%s\n%s"%(string,fielddisplay(self,'hydrostatic_adjustment','adjustment of ice shelves surface and bed elevations: ''Incremental'' or ''Absolute'' ')) + string="%s\n%s"%(string,fielddisplay(self,'stabilization','0: no, 1: artificial_diffusivity, 2: streamline upwinding, 3: discontinuous Galerkin, 4: Flux Correction Transport')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + + return string + #}}} + def extrude(self,md): # {{{ + self.spcthickness=project3d(md,'vector',self.spcthickness,'type','node') + return self + #}}} + def defaultoutputs(self,md): # {{{ + + return ['Thickness','Surface','Base'] + + #}}} + def setdefaultparameters(self): # {{{ + + #Type of stabilization to use 0:nothing 1:artificial_diffusivity 3:Discontinuous Galerkin + self.stabilization=1 + + #Factor applied to compute the penalties kappa=max(stiffness matrix)*10^penalty_factor + self.penalty_factor=3 + + #Minimum ice thickness that can be used + self.min_thickness=1 + + #Hydrostatic adjustment + self.hydrostatic_adjustment='Absolute' + + #default output + self.requested_outputs=['default'] + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if (MasstransportAnalysisEnum() not in analyses) or (solution==TransientSolutionEnum() and not md.transient.ismasstransport): + return md + + md = checkfield(md,'fieldname','masstransport.spcthickness','timeseries',1) + md = checkfield(md,'fieldname','masstransport.isfreesurface','values',[0,1]) + md = checkfield(md,'fieldname','masstransport.hydrostatic_adjustment','values',['Absolute','Incremental']) + md = checkfield(md,'fieldname','masstransport.stabilization','values',[0,1,2,3,4]) + md = checkfield(md,'fieldname','masstransport.min_thickness','>',0) + md = checkfield(md,'fieldname','masstransport.requested_outputs','stringrow',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.*24.*3600. + + WriteData(fid,'object',self,'fieldname','spcthickness','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','isfreesurface','format','Boolean') + WriteData(fid,'object',self,'fieldname','min_thickness','format','Double') + WriteData(fid,'data',StringToEnum(self.hydrostatic_adjustment)[0],'format','Integer','enum',MasstransportHydrostaticAdjustmentEnum()) + WriteData(fid,'object',self,'fieldname','stabilization','format','Integer') + WriteData(fid,'object',self,'fieldname','vertex_pairing','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'fieldname','penalty_factor','format','Double') + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',MasstransportRequestedOutputsEnum(),'format','StringArray') + # }}} Index: ../trunk-jpl/src/py3/classes/frictionweertman.py =================================================================== --- ../trunk-jpl/src/py3/classes/frictionweertman.py (revision 0) +++ ../trunk-jpl/src/py3/classes/frictionweertman.py (revision 19895) @@ -0,0 +1,48 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class frictionweertman(object): + """ + FRICTIONWEERTMAN class definition + + Usage: + frictionweertman=frictionweertman(); + """ + + def __init__(self): # {{{ + self.C = float('NaN') + self.m = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string="Weertman sliding law parameters: Sigma_b = C^(-1/m) * |u_b|^(1/m-1) * u_b" + + string="%s\n%s"%(string,fielddisplay(self,"C","friction coefficient [SI]")) + string="%s\n%s"%(string,fielddisplay(self,"m","m exponent")) + return string + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if StressbalanceAnalysisEnum() not in analyses and ThermalAnalysisEnum() not in analyses: + return md + + md = checkfield(md,'fieldname','friction.C','timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','friction.m','NaN',1,'size',[md.mesh.numberofelements]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',FrictionLawEnum(),'data',2,'format','Integer') + WriteData(fid,'class','friction','object',self,'fieldname','C','format','DoubleMat','mattype',1) + WriteData(fid,'class','friction','object',self,'fieldname','m','format','DoubleMat','mattype',2) + # }}} Index: ../trunk-jpl/src/py3/classes/mesh3dprisms.py =================================================================== --- ../trunk-jpl/src/py3/classes/mesh3dprisms.py (revision 0) +++ ../trunk-jpl/src/py3/classes/mesh3dprisms.py (revision 19895) @@ -0,0 +1,152 @@ +import numpy +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import * +import MatlabFuncs as m +from WriteData import WriteData + +class mesh3dprisms(object): + """ + MESH3DPRISMS class definition + + Usage: + mesh3d=mesh3dprisms(); + """ + + def __init__(self): # {{{ + self.x = float('NaN'); + self.y = float('NaN'); + self.z = float('NaN'); + self.elements = float('NaN'); + self.numberoflayers = 0; + self.numberofelements = 0; + self.numberofvertices = 0; + + self.lat = float('NaN'); + self.long = float('NaN'); + self.epsg = 0; + + self.vertexonbase = float('NaN'); + self.vertexonsurface = float('NaN'); + self.lowerelements = float('NaN'); + self.lowervertex = float('NaN'); + self.upperelements = float('NaN'); + self.uppervertex = float('NaN'); + self.vertexonboundary = float('NaN'); + + self.vertexconnectivity = float('NaN'); + self.elementconnectivity = float('NaN'); + self.average_vertex_connectivity = 0; + + self.x2d = float('NaN'); + self.y2d = float('NaN'); + self.elements2d = float('NaN'); + self.numberofvertices2d = 0; + self.numberofelements2d = 0; + + self.extractedvertices = float('NaN'); + self.extractedelements = float('NaN'); + + #set defaults + self.setdefaultparameters() + #}}} + def __repr__(self): # {{{ + string=" 3D prism Mesh:" + + string="%s\n%s"%(string,"\n Elements and vertices of the original 2d mesh3dprisms:") + + string="%s\n%s"%(string,fielddisplay(self,"numberofelements2d","number of elements")) + string="%s\n%s"%(string,fielddisplay(self,"numberofvertices2d","number of vertices")) + string="%s\n%s"%(string,fielddisplay(self,"elements2d","vertex indices of the mesh3dprisms elements")) + string="%s\n%s"%(string,fielddisplay(self,"x2d","vertices x coordinate [m]")) + string="%s\n%s"%(string,fielddisplay(self,"y2d","vertices y coordinate [m]")) + + string="%s\n%s"%(string,"\n\n Elements and vertices of the extruded 3d mesh3dprisms:") + string="%s\n%s"%(string,fielddisplay(self,"numberofelements","number of elements")) + string="%s\n%s"%(string,fielddisplay(self,"numberofvertices","number of vertices")) + string="%s\n%s"%(string,fielddisplay(self,"elements","vertex indices of the mesh3dprisms elements")) + string="%s\n%s"%(string,fielddisplay(self,"x","vertices x coordinate [m]")) + string="%s\n%s"%(string,fielddisplay(self,"y","vertices y coordinate [m]")) + string="%s\n%s"%(string,fielddisplay(self,"z","vertices z coordinate [m]")) + + string="%s%s"%(string,"\n\n Properties:") + string="%s\n%s"%(string,fielddisplay(self,"numberoflayers","number of extrusion layers")) + string="%s\n%s"%(string,fielddisplay(self,"vertexonbase","lower vertices flags list")) + string="%s\n%s"%(string,fielddisplay(self,"vertexonsurface","upper vertices flags list")) + string="%s\n%s"%(string,fielddisplay(self,"uppervertex","upper vertex list (-1 for vertex on the upper surface)")) + string="%s\n%s"%(string,fielddisplay(self,"upperelements","upper element list (-1 for element on the upper layer)")) + string="%s\n%s"%(string,fielddisplay(self,"lowervertex","lower vertex list (-1 for vertex on the lower surface)")) + string="%s\n%s"%(string,fielddisplay(self,"lowerelements","lower element list (-1 for element on the lower layer)")) + string="%s\n%s"%(string,fielddisplay(self,"vertexonboundary","vertices on the boundary of the domain flag list")) + string="%s\n%s"%(string,fielddisplay(self,"vertexconnectivity","list of vertices connected to vertex_i")) + string="%s\n%s"%(string,fielddisplay(self,"elementconnectivity","list of vertices connected to element_i")) + string="%s\n%s"%(string,fielddisplay(self,"average_vertex_connectivity","average number of vertices connected to one vertex")) + + string="%s%s"%(string,"\n\n Extracted model:") + string="%s\n%s"%(string,fielddisplay(self,"extractedvertices","vertices extracted from the model")) + string="%s\n%s"%(string,fielddisplay(self,"extractedelements","elements extracted from the model")) + + string="%s%s"%(string,"\n\n Projection:") + string="%s\n%s"%(string,fielddisplay(self,"lat","vertices latitude [degrees]")) + string="%s\n%s"%(string,fielddisplay(self,"long","vertices longitude [degrees]")) + string="%s\n%s"%(string,fielddisplay(self,"epsg","EPSG code (ex: 3413 for UPS Greenland, 3031 for UPS Antarctica)")) + return string + #}}} + def setdefaultparameters(self): # {{{ + + #the connectivity is the averaged number of nodes linked to a + #given node through an edge. This connectivity is used to initially + #allocate memory to the stiffness matrix. A value of 16 seems to + #give a good memory/time ration. This value can be checked in + #trunk/test/Miscellaneous/runme.m + self.average_vertex_connectivity=25 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,'fieldname','mesh.x','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','mesh.y','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','mesh.z','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','mesh.elements','NaN',1,'>',0,'values',numpy.arange(1,md.mesh.numberofvertices+1)) + md = checkfield(md,'fieldname','mesh.elements','size',[md.mesh.numberofelements,6]) + if numpy.any(numpy.logical_not(m.ismember(numpy.arange(1,md.mesh.numberofvertices+1),md.mesh.elements))): + md.checkmessage("orphan nodes have been found. Check the mesh3dprisms outline") + md = checkfield(md,'fieldname','mesh.numberoflayers','>=',0) + md = checkfield(md,'fieldname','mesh.numberofelements','>',0) + md = checkfield(md,'fieldname','mesh.numberofvertices','>',0) + md = checkfield(md,'fieldname','mesh.vertexonbase','size',[md.mesh.numberofvertices],'values',[0,1]) + md = checkfield(md,'fieldname','mesh.vertexonsurface','size',[md.mesh.numberofvertices],'values',[0,1]) + md = checkfield(md,'fieldname','mesh.average_vertex_connectivity','>=',24,'message',"'mesh.average_vertex_connectivity' should be at least 24 in 3d") + + return md + # }}} + def domaintype(self): # {{{ + return "3D" + #}}} + def dimension(self): # {{{ + return 3 + #}}} + def elementtype(self): # {{{ + return "Penta" + #}}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',DomainTypeEnum(),'data',StringToEnum("Domain"+self.domaintype())[0],'format','Integer'); + WriteData(fid,'enum',DomainDimensionEnum(),'data',self.dimension(),'format','Integer'); + WriteData(fid,'enum',MeshElementtypeEnum(),'data',StringToEnum(self.elementtype())[0],'format','Integer'); + WriteData(fid,'object',self,'class','mesh','fieldname','x','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','mesh','fieldname','y','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','mesh','fieldname','z','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','mesh','fieldname','elements','format','DoubleMat','mattype',2) + WriteData(fid,'object',self,'class','mesh','fieldname','numberoflayers','format','Integer') + WriteData(fid,'object',self,'class','mesh','fieldname','numberofelements','format','Integer') + WriteData(fid,'object',self,'class','mesh','fieldname','numberofvertices','format','Integer') + WriteData(fid,'object',self,'class','mesh','fieldname','vertexonbase','format','BooleanMat','mattype',1) + WriteData(fid,'object',self,'class','mesh','fieldname','vertexonsurface','format','BooleanMat','mattype',1) + WriteData(fid,'object',self,'class','mesh','fieldname','lowerelements','format','DoubleMat','mattype',2) + WriteData(fid,'object',self,'class','mesh','fieldname','upperelements','format','DoubleMat','mattype',2) + WriteData(fid,'object',self,'class','mesh','fieldname','average_vertex_connectivity','format','Integer') + WriteData(fid,'object',self,'class','mesh','fieldname','elements2d','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'class','mesh','fieldname','numberofvertices2d','format','Integer') + WriteData(fid,'object',self,'class','mesh','fieldname','numberofelements2d','format','Integer') + # }}} Index: ../trunk-jpl/src/py3/classes/adinversion.py =================================================================== --- ../trunk-jpl/src/py3/classes/adinversion.py (revision 0) +++ ../trunk-jpl/src/py3/classes/adinversion.py (revision 19895) @@ -0,0 +1,202 @@ +""" +== == == == == == == == == == == == == == == == == == == +Auto generated python script for ISSM: /home/andrei/issm/trunk-jpl/src/m/classes/adinversion.m +Created on 2015-05-15 via translateToPy.py Ver 1.0 by andrei +== == == == == == == == == == == == == == == == == == == + +Matlab script conversion into python +translateToPy.py Author: Michael Pellegrin +translateToPy.py Date: 09/24/12 +== == == == == == == == == == == == == == == == == == == +""" + +from MatlabFuncs import * + +from EnumDefinitions import * +from numpy import * + +# ADINVERSION class definition + +# + +# Usage: + +# adinversion=adinversion(); + + + +class adinversion: + def __init__(self): + iscontrol = 0 + control_parameters = float('Nan') + control_scaling_factors = float('Nan') + maxsteps = 0 + maxiter = 0 + dxmin = 0 + gttol = 0 + cost_functions = float('Nan') + cost_functions_coefficients = float('Nan') + min_parameters = float('Nan') + max_parameters = float('Nan') + vx_obs = float('Nan') + vy_obs = float('Nan') + vz_obs = float('Nan') + vel_obs = float('Nan') + thickness_obs = float('Nan') + surface_obs = float('Nan') + + def setdefaultparameters(self): + + self.control_parameters=['FrictionCoefficient'] + + +# Scaling factor for each control + self.control_scaling_factors=1 + +# number of iterations + self.maxsteps=20 + self.maxiter=40 + +# several responses can be used: + self.cost_functions=['FrictionCoefficient'] + +# m1qn3 parameters + self.dxmin = 0.1 + self.gttol = 1e-4 + + return self + + def checkconsistency(self, md, solution, analyses): + +# Early return + if not self.iscontrol: + return + + if not IssmConfig('_HAVE_M1QN3_'): + md = checkmessage(md,['M1QN3 has not been installed, ISSM needs to be reconfigured and recompiled with AD']) + + + num_controls=numpy.numel(md.inversion.control_parameters) + num_costfunc=numpy.size(md.inversion.cost_functions,2) + + + md = checkfield(md,'fieldname','inversion.iscontrol','values',[0, 1]) + md = checkfield(md,'fieldname','inversion.control_parameters','cell',1,'values',\ + ['BalancethicknessThickeningRate' 'FrictionCoefficient' 'MaterialsRheologyBbar' 'DamageDbar',\ + 'Vx' 'Vy' 'Thickness' 'BalancethicknessOmega' 'BalancethicknessApparentMassbalance']) + md = checkfield(md,'fieldname','inversion.control_scaling_factors','size',[1, num_controls],'>',0,float('Nan'),1) + md = checkfield(md,'fieldname','inversion.maxsteps','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.maxiter','numel',1,'>=',0) + md = checkfield(md,'fieldname','inversion.dxmin','numel',1,'>',0) + md = checkfield(md,'fieldname','inversion.gttol','numel',1,'>',0) + md = checkfield(md,'fieldname','inversion.cost_functions','size',[1, num_costfunc],'values', [i for i in range(101,106)]+[201]+[i for i in range(501,507)]+[i for i in range(601,605)]+[i for i in range(1001, 1011)]) + md = checkfield(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices, num_costfunc],'>=',0) + md = checkfield(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices, num_controls]) + md = checkfield(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices, num_controls]) + + + if solution==BalancethicknessSolutionEnum(): + md = checkfield(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1) + md = checkfield(md,'fieldname','inversion.surface_obs','size',[md.mesh.numberofvertices, 1], float('Nan'),1) + elif solution==BalancethicknessSoftSolutionEnum(): + md = checkfield(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1) + else: + md = checkfield(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1) + if not numpy.strcmp(domaintype(md.mesh),'2Dvertical'): + md = checkfield(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1) + return md + + def __repr__(self): + string = ' adinversion parameters:' + string ="%s\n\%s"%(string, fielddisplay(self,'iscontrol','is inversion activated?')) + string ="%s\n\%s"%(string, fielddisplay(self,'control_parameters','ex: [''FrictionCoefficient''], or [''MaterialsRheologyBbar'']')) + string ="%s\n\%s"%(string, fielddisplay(self,'control_scaling_factors','order of magnitude of each control (useful for multi-parameter optimization)')) + string ="%s\n\%s"%(string, fielddisplay(self,'maxsteps','maximum number of iterations (gradient computation)')) + string ="%s\n\%s"%(string, fielddisplay(self,'maxiter','maximum number of Function evaluation (forward run)')) + string ="%s\n\%s"%(string, fielddisplay(self,'dxmin','convergence criterion: two points less than dxmin from eachother (sup-norm) are considered identical')) + string ="%s\n\%s"%(string, fielddisplay(self,'gttol','convergence criterion: ||g(X)||/||g(X0)|| (g(X0): gradient at initial guess X0)')) + string ="%s\n\%s"%(string, fielddisplay(self,'cost_functions','indicate the type of response for each optimization step')) + string ="%s\n\%s"%(string, fielddisplay(self,'cost_functions_coefficients','cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) + string ="%s\n\%s"%(string, fielddisplay(self,'min_parameters','absolute minimum acceptable value of the inversed parameter on each vertex')) + string ="%s\n\%s"%(string, fielddisplay(self,'max_parameters','absolute maximum acceptable value of the inversed parameter on each vertex')) + string ="%s\n\%s"%(string, fielddisplay(self,'vx_obs','observed velocity x component [m/yr]')) + string ="%s\n\%s"%(string, fielddisplay(self,'vy_obs','observed velocity y component [m/yr]')) + string ="%s\n\%s"%(string, fielddisplay(self,'vel_obs','observed velocity magnitude [m/yr]')) + string ="%s\n\%s"%(string, fielddisplay(self,'thickness_obs','observed thickness [m]')) + string ="%s\n\%s"%(string, fielddisplay(self,'surface_obs','observed surface elevation [m]')) + string ="%s\n%s"%(string,'Available cost functions:'); + string ="%s\n%s"%(string,' 101: SurfaceAbsVelMisfit'); + string ="%s\n%s"%(string,' 102: SurfaceRelVelMisfit'); + string ="%s\n%s"%(string,' 103: SurfaceLogVelMisfit'); + string ="%s\n%s"%(string,' 104: SurfaceLogVxVyMisfit'); + string ="%s\n%s"%(string,' 105: SurfaceAverageVelMisfit'); + string ="%s\n%s"%(string,' 201: ThicknessAbsMisfit'); + string ="%s\n%s"%(string,' 501: DragCoefficientAbsGradient'); + string ="%s\n%s"%(string,' 502: RheologyBbarAbsGradient'); + string ="%s\n%s"%(string,' 503: ThicknessAbsGradient'); + + return string + + def marshall(self): + + yts=365.0*24.0*3600.0; + + WriteData(fid,'object',self,'class','inversion','fieldname','iscontrol','format','Boolean'); + WriteData(fid,'enum',InversionTypeEnum(),'data',4,'format','Integer'); + if not self.iscontrol: + return + WriteData(fid,'object',self,'class','inversion','fieldname','control_scaling_factors','format','DoubleMat','mattype',3); + WriteData(fid,'object',self,'class','inversion','fieldname','maxsteps','format','Integer'); + WriteData(fid,'object',self,'class','inversion','fieldname','maxiter','format','Integer'); + WriteData(fid,'object',self,'class','inversion','fieldname','dxmin','format','Double'); + WriteData(fid,'object',self,'class','inversion','fieldname','gttol','format','Double'); + WriteData(fid,'object',self,'class','inversion','fieldname','cost_functions_coefficients','format','DoubleMat','mattype',1); + WriteData(fid,'object',self,'class','inversion','fieldname','min_parameters','format','DoubleMat','mattype',3); + WriteData(fid,'object',self,'class','inversion','fieldname','max_parameters','format','DoubleMat','mattype',3); + WriteData(fid,'object',self,'class','inversion','fieldname','vx_obs','format','DoubleMat','mattype',1,'scale',1./yts); + WriteData(fid,'object',self,'class','inversion','fieldname','vy_obs','format','DoubleMat','mattype',1,'scale',1./yts); + WriteData(fid,'object',self,'class','inversion','fieldname','vz_obs','format','DoubleMat','mattype',1,'scale',1./yts); + if(numel(self.thickness_obs)==md.mesh.numberofelements): + mattype=2; + else: + mattype=1; + + WriteData(fid,'object',self,'class','inversion','fieldname','thickness_obs','format','DoubleMat','mattype',mattype); + WriteData(fid,'object',self,'class','inversion','fieldname','surface_obs','format','DoubleMat','mattype',mattype); + + #process control parameters + num_control_parameters = numpy.numel(self.control_parameters); + data = numpy.array([StringToEnum(self.control_parameter[0]) for control_parameter in self.control_parameters]).reshape(1,-1) + + WriteData(fid,'data',data,'enum',InversionControlParametersEnum(),'format','DoubleMat','mattype',3); + WriteData(fid,'data',num_control_parameters,'enum',InversionNumControlParametersEnum(),'format','Integer'); + + #process cost functions + num_cost_functions=numpy.size(self.cost_functions,2); + data=copy.deepcopy(self.cost_functions) + data[numpy.nonzero(self.cost_functions==101)] =SurfaceAbsVelMisfitEnum(); + data[numpy.nonzero(self.cost_functions==102)]=SurfaceRelVelMisfitEnum(); + data[numpy.nonzero(self.cost_functions==103)]=SurfaceLogVelMisfitEnum(); + data[numpy.nonzero(self.cost_functions==104)]=SurfaceLogVxVyMisfitEnum(); + data[numpy.nonzero(self.cost_functions==105)]=SurfaceAverageVelMisfitEnum(); + data[numpy.nonzero(self.cost_functions==201)]=ThicknessAbsMisfitEnum(); + data[numpy.nonzero(self.cost_functions==501)]=DragCoefficientAbsGradientEnum(); + data[numpy.nonzero(self.cost_functions==502)]=RheologyBbarAbsGradientEnum(); + data[numpy.nonzero(self.cost_functions==503)]=ThicknessAbsGradientEnum(); + data[numpy.nonzero(self.cost_functions==504)]=ThicknessAlongGradientEnum(); + data[numpy.nonzero(self.cost_functions==505)]=ThicknessAcrossGradientEnum(); + data[numpy.nonzero(self.cost_functions==506)]=BalancethicknessMisfitEnum(); + data[numpy.nonzero(self.cost_functions==601)]=SurfaceAbsMisfitEnum(); + data[numpy.nonzero(self.cost_functions==1001)]=Outputdefinition1Enum(); + data[numpy.nonzero(self.cost_functions==1002)]=Outputdefinition2Enum(); + data[numpy.nonzero(self.cost_functions==1003)]=Outputdefinition3Enum(); + data[numpy.nonzero(self.cost_functions==1004)]=Outputdefinition4Enum(); + data[numpy.nonzero(self.cost_functions==1005)]=Outputdefinition5Enum(); + data[numpy.nonzero(self.cost_functions==1006)]=Outputdefinition6Enum(); + data[numpy.nonzero(self.cost_functions==1007)]=Outputdefinition7Enum(); + data[numpy.nonzero(self.cost_functions==1008)]=Outputdefinition8Enum(); + data[numpy.nonzero(self.cost_functions==1009)]=Outputdefinition8Enum(); + data[numpy.nonzero(self.cost_functions==1010)]=Outputdefinition10Enum(); + WriteData(fid,'data',data,'enum',InversionCostFunctionsEnum(),'format','DoubleMat','mattype',3); + WriteData(fid,'data',num_cost_functions,'enum',InversionNumCostFunctionsEnum(),'format','Integer'); + Index: ../trunk-jpl/src/py3/classes/SMBgradients.py =================================================================== --- ../trunk-jpl/src/py3/classes/SMBgradients.py (revision 0) +++ ../trunk-jpl/src/py3/classes/SMBgradients.py (revision 19895) @@ -0,0 +1,77 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +from project3d import project3d + +class SMBgradients(object): + """ + SMBgradients Class definition + + Usage: + SMBgradients=SMBgradients(); + """ + + def __init__(self): # {{{ + self.href = float('NaN') + self.smbref = float('NaN') + self.b_pos = float('NaN') + self.b_neg = float('NaN') + self.requested_outputs = [] + #}}} + def __repr__(self): # {{{ + string=" surface forcings parameters:" + + string="%s\n%s"%(string,fielddisplay(self,'issmbgradients','is smb gradients method activated (0 or 1, default is 0)')) + string="%s\n%s"%(string,fielddisplay(self,'href',' reference elevation from which deviation is used to calculate SMB adjustment in smb gradients method')) + string="%s\n%s"%(string,fielddisplay(self,'smbref',' reference smb from which deviation is calculated in smb gradients method')) + string="%s\n%s"%(string,fielddisplay(self,'b_pos',' slope of hs - smb regression line for accumulation regime required if smb gradients is activated')) + string="%s\n%s"%(string,fielddisplay(self,'b_neg',' slope of hs - smb regression line for ablation regime required if smb gradients is activated')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + + return string + #}}} + def extrude(self,md): # {{{ + + #Nothing for now + return self + #}}} + def defaultoutputs(self,md): # {{{ + return [] + #}}} + def initialize(self,md): # {{{ + + #Nothing for now + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.href','timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','smb.smbref','timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','smb.b_pos','timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','smb.b_neg','timeseries',1,'NaN',1) + + md = checkfield(md,'fieldname','masstransport.requested_outputs','stringrow',1) + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',SmbEnum(),'data',SMBgradientsEnum(),'format','Integer'); + WriteData(fid,'object',self,'class','smb','fieldname','href','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','smbref','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','b_pos','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','b_neg','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',SmbRequestedOutputsEnum(),'format','StringArray') + + # }}} Index: ../trunk-jpl/src/py3/classes/calving.py =================================================================== --- ../trunk-jpl/src/py3/classes/calving.py (revision 0) +++ ../trunk-jpl/src/py3/classes/calving.py (revision 19895) @@ -0,0 +1,71 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData + +class calving(object): + """ + CALVING class definition + + Usage: + calving=calving(); + """ + + def __init__(self): # {{{ + + self.stabilization = 0 + self.spclevelset = float('NaN') + self.calvingrate = float('NaN') + self.meltingrate = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' Calving parameters:' + string="%s\n%s"%(string,fielddisplay(self,'stabilization','0: no, 1: artificial_diffusivity, 2: streamline upwinding')) + string="%s\n%s"%(string,fielddisplay(self,'spclevelset','levelset constraints (NaN means no constraint)')) + string="%s\n%s"%(string,fielddisplay(self,'calvingrate','calving rate at given location [m/a]')) + string="%s\n%s"%(string,fielddisplay(self,'meltingrate','melting rate at given location [m/a]')) + + return string + #}}} + def extrude(self,md): # {{{ + self.spclevelset=project3d(md,'vector',self.spclevelset,'type','node') + self.calvingrate=project3d(md,'vector',self.calvingrate,'type','node') + self.meltingrate=project3d(md,'vector',self.meltingrate,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + + #stabilization = 2 by default + self.stabilization = 2 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if (solution!=TransientSolutionEnum()) or (not md.transient.iscalving): + return md + + md = checkfield(md,'fieldname','calving.spclevelset','timeseries',1) + md = checkfield(md,'fieldname','calving.stabilization','values',[0,1,2]); + md = checkfield(md,'fieldname','calving.calvingrate','>=',0,'timeseries',1,'NaN',1); + md = checkfield(md,'fieldname','calving.meltingrate','>=',0,'timeseries',1,'NaN',1); + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.*24.*3600. + + WriteData(fid,'enum',CalvingLawEnum(),'data',DefaultCalvingEnum(),'format','Integer'); + WriteData(fid,'enum',LevelsetStabilizationEnum(),'data',self.stabilization,'format','Integer'); + WriteData(fid,'enum',SpcLevelsetEnum(),'data',self.spclevelset,'format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1); + WriteData(fid,'object',self,'fieldname','calvingrate','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'scale',1./yts) + WriteData(fid,'object',self,'fieldname','meltingrate','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'scale',1./yts) + # }}} Index: ../trunk-jpl/src/py3/classes/SMBcomponents.py =================================================================== --- ../trunk-jpl/src/py3/classes/SMBcomponents.py (revision 0) +++ ../trunk-jpl/src/py3/classes/SMBcomponents.py (revision 19895) @@ -0,0 +1,96 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import * +from project3d import * +from WriteData import * + +class SMBcomponents(object): + """ + SMBcomponents Class definition + + Usage: + SMBcomponents=SMBcomponents(); + """ + + def __init__(self): # {{{ + self.accumulation = float('NaN') + self.runoff = float('NaN') + self.evaporation = float('NaN') + self.requested_outputs = [] + #}}} + def __repr__(self): # {{{ + string=" surface forcings parameters (SMB=accumulation-runoff-evaporation) :" + string="%s\n%s"%(string,fielddisplay(self,'accumulation','accumulated snow [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'runoff','amount of ice melt lost from the ice column [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'evaporation','mount of ice lost to evaporative processes [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + return string + #}}} + def extrude(self,md): # {{{ + + self.mass_balance=project3d(md,'vector',self.accumulation,'type','node'); + self.mass_balance=project3d(md,'vector',self.runoff,'type','node'); + self.mass_balance=project3d(md,'vector',self.evaporation,'type','node'); + return self + #}}} + def defaultoutputs(self,md): # {{{ + return [] + #}}} + def initialize(self,md): # {{{ + + if numpy.all(numpy.isnan(self.accumulation)): + self.accumulation=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMB.accumulation specified: values set as zero") + + if numpy.all(numpy.isnan(self.runoff)): + self.runoff=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMB.runoff specified: values set as zero") + + if numpy.all(numpy.isnan(self.evaporation)): + self.evaporation=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMB.evaporation specified: values set as zero") + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.accumulation','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.accumulation','size',[md.mesh.numberofvertices],'NaN',1) + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.runoff','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.runoff','size',[md.mesh.numberofvertices],'NaN',1) + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.evaporation','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.evaporation','size',[md.mesh.numberofvertices],'NaN',1) + + md = checkfield(md,'fieldname','masstransport.requested_outputs','stringrow',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',SmbEnum(),'data',SMBcomponentsEnum(),'format','Integer'); + WriteData(fid,'object',self,'class','smb','fieldname','accumulation','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','runoff','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','evaporation','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',SmbRequestedOutputsEnum(),'format','StringArray') + + # }}} Index: ../trunk-jpl/src/py3/classes/rifts.py =================================================================== --- ../trunk-jpl/src/py3/classes/rifts.py (revision 0) +++ ../trunk-jpl/src/py3/classes/rifts.py (revision 19895) @@ -0,0 +1,84 @@ +import numpy +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +from isnans import isnans +import MatlabFuncs as m + +class rifts(object): + """ + RIFTS class definition + + Usage: + rifts=rifts(); + """ + + def __init__(self): # {{{ + self.riftstruct = [] + self.riftproperties = [] + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' rifts parameters:' + + string="%s\n%s"%(string,fielddisplay(self,'riftstruct','structure containing all rift information (vertices coordinates, segments, type of melange, ...)')) + string="%s\n%s"%(string,fielddisplay(self,'riftproperties','')) + return string + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + if (not self.riftstruct) or numpy.any(isnans(self.riftstruct)): + numrifts=0 + else: + numrifts=len(self.riftstruct) + + if numrifts: + if not m.strcmp(md.mesh.domaintype(),'2Dhorizontal'): + md.checkmessage("models with rifts are only supported in 2d for now!") + if not isinstance(self.riftstruct,list): + md.checkmessage("rifts.riftstruct should be a structure!") + if numpy.any(md.mesh.segmentmarkers>=2): + #We have segments with rift markers, but no rift structure! + md.checkmessage("model should be processed for rifts (run meshprocessrifts)!") + for i,rift in enumerate(self.riftstruct): + md = checkfield(md,'fieldname',"rifts.riftstruct[%d]['fill']" % i,'values',[WaterEnum(),AirEnum(),IceEnum(),MelangeEnum()]) + else: + if self.riftstruct and numpy.any(numpy.logical_not(isnans(self.riftstruct))): + md.checkmessage("riftstruct should be NaN since numrifts is 0!") + + return md + # }}} + def marshall(self,md,fid): # {{{ + + #Process rift info + if (not self.riftstruct) or numpy.any(isnans(self.riftstruct)): + numrifts=0 + else: + numrifts=len(self.riftstruct) + + numpairs=0 + for rift in self.riftstruct: + numpairs+=numpy.size(rift['penaltypairs'],axis=0) + + # 2 for nodes + 2 for elements+ 2 for normals + 1 for length + 1 for fill + 1 for friction + 1 for fraction + 1 for fractionincrement + 1 for state. + data=numpy.zeros((numpairs,12)) + count=0 + for rift in self.riftstruct: + numpairsforthisrift=numpy.size(rift['penaltypairs'],0) + data[count:count+numpairsforthisrift,0:7]=rift['penaltypairs'] + data[count:count+numpairsforthisrift,7]=rift['fill'] + data[count:count+numpairsforthisrift,8]=rift['friction'] + data[count:count+numpairsforthisrift,9]=rift['fraction'] + data[count:count+numpairsforthisrift,10]=rift['fractionincrement'] + data[count:count+numpairsforthisrift,11]=rift['state'].reshape(-1) + count+=numpairsforthisrift + + WriteData(fid,'data',numrifts,'enum',RiftsNumriftsEnum(),'format','Integer') + WriteData(fid,'data',data,'enum',RiftsRiftstructEnum(),'format','DoubleMat','mattype',3) + # }}} Index: ../trunk-jpl/src/py3/classes/flaim.py =================================================================== --- ../trunk-jpl/src/py3/classes/flaim.py (revision 0) +++ ../trunk-jpl/src/py3/classes/flaim.py (revision 19895) @@ -0,0 +1,72 @@ +import numpy +from collections import OrderedDict +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class flaim(object): + """ + FLAIM class definition + + Usage: + flaim=flaim(); + """ + + def __init__(self): # {{{ + self.targets = '' + self.tracks = '' + self.flightreqs = OrderedDict() + self.criterion = float('NaN') + self.gridsatequator = 200000 + self.usevalueordering = True + self.split_antimeridian = True + self.solution = '' + self.quality = 0 + self.path_optimize = False + self.opt_ndir = 1 + self.opt_dist = 25 + self.opt_niter = 30000 + #}}} + def __repr__(self): # {{{ + string=' FLAIM - Flight Line Adaptation using Ice sheet Modeling:' + + string="%s\n\n%s"%(string,' Input:') + string="%s\n%s"%(string,fielddisplay(self,'targets' ,'name of kml output targets file ')) + string="%s\n%s"%(string,fielddisplay(self,'tracks' ,'name of kml input tracks file ')) + string="%s\n%s"%(string,fielddisplay(self,'flightreqs' ,'structure of kml flight requirements (not used yet)')) + string="%s\n%s"%(string,fielddisplay(self,'criterion' ,'element or nodal criterion for flight path evaluation (metric)')) + + string="%s\n\n%s"%(string,' Arguments:') + string="%s\n%s"%(string,fielddisplay(self,'gridsatequator' ,'number of grids at equator (determines resolution)')) + string="%s\n%s"%(string,fielddisplay(self,'usevalueordering' ,'flag to consider target values for flight path evaluation')) + string="%s\n%s"%(string,fielddisplay(self,'split_antimeridian' ,'flag to split polygons on the antimeridian')) + + string="%s\n\n%s"%(string,' Optimization:') + string="%s\n%s"%(string,fielddisplay(self,'path_optimize' ,'optimize? (default false)')) + string="%s\n%s"%(string,fielddisplay(self,'opt_ndir' ,['number of directions to test when moving a point. If this value = 1, a random direction is tested.',\ + 'A value > 1 results in directions equally spaced from [0, 2*PI] being tested.',\ + 'For example, 4 would result in directions [0, PI/2, PI, 3PI/2].'])) + string="%s\n%s"%(string,fielddisplay(self,'opt_dist' ,'specifies the distance in km (default 25) to move a randomly selected path point on each iteration')) + string="%s\n%s"%(string,fielddisplay(self,'opt_niter' ,['number of iterations (default 30,000) to run for flightplan optimization',\ + 'i.e. the number of times to randomly select a point and move it.'])) + + string="%s\n\n%s"%(string,' Output:') + string="%s\n%s"%(string,fielddisplay(self,'solution' ,'name of kml solution file')) + string="%s\n%s"%(string,fielddisplay(self,'quality' ,'quality of kml solution')) + return string + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if not solution==FlaimSolutionEnum(): + return md + + md = checkfield(md,'fieldname','flaim.tracks','file',1) + if numpy.any(numpy.isnan(md.flaim.criterion)) or not md.flaim.criterion: + md = checkfield(md,'fieldname','flaim.targets','file',1) + else: + md = checkfield(md,'fieldname','flaim.criterion','numel',[md.mesh.numberofvertices,md.mesh.numberofelements]) + + return md + # }}} Index: ../trunk-jpl/src/py3/classes/stressbalance.py =================================================================== --- ../trunk-jpl/src/py3/classes/stressbalance.py (revision 0) +++ ../trunk-jpl/src/py3/classes/stressbalance.py (revision 19895) @@ -0,0 +1,208 @@ +import numpy +import sys +import copy +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class stressbalance(object): + """ + STRESSBALANCE class definition + + Usage: + stressbalance=stressbalance(); + """ + + def __init__(self): # {{{ + self.spcvx = float('NaN') + self.spcvy = float('NaN') + self.spcvz = float('NaN') + self.restol = 0 + self.reltol = 0 + self.abstol = 0 + self.isnewton = 0 + self.FSreconditioning = 0 + self.viscosity_overshoot = 0 + self.icefront = float('NaN') + self.maxiter = 0 + self.shelf_dampening = 0 + self.vertex_pairing = float('NaN') + self.penalty_factor = float('NaN') + self.rift_penalty_lock = float('NaN') + self.rift_penalty_threshold = 0 + self.referential = float('NaN') + self.loadingforce = float('NaN') + self.requested_outputs = [] + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + + string=' StressBalance solution parameters:' + string="%s\n%s"%(string,' Convergence criteria:') + string="%s\n%s"%(string,fielddisplay(self,'restol','mechanical equilibrium residual convergence criterion')) + string="%s\n%s"%(string,fielddisplay(self,'reltol','velocity relative convergence criterion, NaN: not applied')) + string="%s\n%s"%(string,fielddisplay(self,'abstol','velocity absolute convergence criterion, NaN: not applied')) + string="%s\n%s"%(string,fielddisplay(self,'isnewton',"0: Picard's fixed point, 1: Newton's method, 2: hybrid")) + string="%s\n%s"%(string,fielddisplay(self,'maxiter','maximum number of nonlinear iterations')) + string="%s\n%s"%(string,fielddisplay(self,'viscosity_overshoot','over-shooting constant new=new+C*(new-old)')) + + string="%s\n%s"%(string,'\n boundary conditions:') + string="%s\n%s"%(string,fielddisplay(self,'spcvx','x-axis velocity constraint (NaN means no constraint) [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'spcvy','y-axis velocity constraint (NaN means no constraint) [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'spcvz','z-axis velocity constraint (NaN means no constraint) [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'icefront','segments on ice front list (last column 0: Air, 1: Water, 2: Ice')) + + string="%s\n%s"%(string,'\n Rift options:') + string="%s\n%s"%(string,fielddisplay(self,'rift_penalty_threshold','threshold for instability of mechanical constraints')) + string="%s\n%s"%(string,fielddisplay(self,'rift_penalty_lock','number of iterations before rift penalties are locked')) + + string="%s\n%s"%(string,'\n Penalty options:') + string="%s\n%s"%(string,fielddisplay(self,'penalty_factor','offset used by penalties: penalty = Kmax*10^offset')) + string="%s\n%s"%(string,fielddisplay(self,'vertex_pairing','pairs of vertices that are penalized')) + + string="%s\n%s"%(string,'\n Other:') + string="%s\n%s"%(string,fielddisplay(self,'shelf_dampening','use dampening for floating ice ? Only for FS model')) + string="%s\n%s"%(string,fielddisplay(self,'FSreconditioning','multiplier for incompressibility equation. Only for FS model')) + string="%s\n%s"%(string,fielddisplay(self,'referential','local referential')) + string="%s\n%s"%(string,fielddisplay(self,'loadingforce','loading force applied on each point [N/m^3]')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + + return string + #}}} + def extrude(self,md): # {{{ + self.spcvx=project3d(md,'vector',self.spcvx,'type','node') + self.spcvy=project3d(md,'vector',self.spcvy,'type','node') + self.spcvz=project3d(md,'vector',self.spcvz,'type','node') + self.referential=project3d(md,'vector',self.referential,'type','node') + self.loadingforce=project3d(md,'vector',self.loadingforce,'type','node') + + return self + #}}} + def setdefaultparameters(self): # {{{ + #maximum of non-linear iterations. + self.maxiter=100 + + #Convergence criterion: absolute, relative and residual + self.restol=10**-4 + self.reltol=0.01 + self.abstol=10 + + self.FSreconditioning=10**13 + self.shelf_dampening=0 + + #Penalty factor applied kappa=max(stiffness matrix)*10^penalty_factor + self.penalty_factor=3 + + #coefficient to update the viscosity between each iteration of + #a stressbalance according to the following formula + #viscosity(n)=viscosity(n)+viscosity_overshoot(viscosity(n)-viscosity(n-1)) + self.viscosity_overshoot=0 + + #Stop the iterations of rift if below a threshold + self.rift_penalty_threshold=0 + + #in some solutions, it might be needed to stop a run when only + #a few constraints remain unstable. For thermal computation, this + #parameter is often used. + self.rift_penalty_lock=10 + + #output default: + self.requested_outputs=['default'] + + return self + #}}} + def defaultoutputs(self,md): # {{{ + + if md.mesh.dimension()==3: + list = ['Vx','Vy','Vz','Vel','Pressure'] + else: + list = ['Vx','Vy','Vel','Pressure'] + return list + + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if StressbalanceAnalysisEnum() not in analyses: + return md + + md = checkfield(md,'fieldname','stressbalance.spcvx','timeseries',1) + md = checkfield(md,'fieldname','stressbalance.spcvy','timeseries',1) + if m.strcmp(md.mesh.domaintype(),'3D'): + md = checkfield(md,'fieldname','stressbalance.spcvz','timeseries',1) + md = checkfield(md,'fieldname','stressbalance.restol','size',[1],'>',0) + md = checkfield(md,'fieldname','stressbalance.reltol','size',[1]) + md = checkfield(md,'fieldname','stressbalance.abstol','size',[1]) + md = checkfield(md,'fieldname','stressbalance.isnewton','numel',[1],'values',[0,1,2]) + md = checkfield(md,'fieldname','stressbalance.FSreconditioning','size',[1],'NaN',1) + md = checkfield(md,'fieldname','stressbalance.viscosity_overshoot','size',[1],'NaN',1) + md = checkfield(md,'fieldname','stressbalance.maxiter','size',[1],'>=',1) + md = checkfield(md,'fieldname','stressbalance.referential','size',[md.mesh.numberofvertices,6]) + md = checkfield(md,'fieldname','stressbalance.loadingforce','size',[md.mesh.numberofvertices,3]) + md = checkfield(md,'fieldname','stressbalance.requested_outputs','stringrow',1); + + #singular solution +# if ~any((~isnan(md.stressbalance.spcvx)+~isnan(md.stressbalance.spcvy))==2), + if not numpy.any(numpy.logical_and(numpy.logical_not(numpy.isnan(md.stressbalance.spcvx)),numpy.logical_not(numpy.isnan(md.stressbalance.spcvy)))): + print("\n !!! Warning: no spc applied, model might not be well posed if no basal friction is applied, check for solution crash\n") + #CHECK THAT EACH LINES CONTAINS ONLY NAN VALUES OR NO NAN VALUES +# if any(sum(isnan(md.stressbalance.referential),2)~=0 & sum(isnan(md.stressbalance.referential),2)~=6), + if numpy.any(numpy.logical_and(numpy.sum(numpy.isnan(md.stressbalance.referential),axis=1)!=0,numpy.sum(numpy.isnan(md.stressbalance.referential),axis=1)!=6)): + md.checkmessage("Each line of stressbalance.referential should contain either only NaN values or no NaN values") + #CHECK THAT THE TWO VECTORS PROVIDED ARE ORTHOGONAL +# if any(sum(isnan(md.stressbalance.referential),2)==0), + if numpy.any(numpy.sum(numpy.isnan(md.stressbalance.referential),axis=1)==0): + pos=[i for i,item in enumerate(numpy.sum(numpy.isnan(md.stressbalance.referential),axis=1)) if item==0] +# numpy.inner (and numpy.dot) calculate all the dot product permutations, resulting in a full matrix multiply +# if numpy.any(numpy.abs(numpy.inner(md.stressbalance.referential[pos,0:2],md.stressbalance.referential[pos,3:5]).diagonal())>sys.float_info.epsilon): +# md.checkmessage("Vectors in stressbalance.referential (columns 1 to 3 and 4 to 6) must be orthogonal") + for item in md.stressbalance.referential[pos,:]: + if numpy.abs(numpy.inner(item[0:2],item[3:5]))>sys.float_info.epsilon: + md.checkmessage("Vectors in stressbalance.referential (columns 1 to 3 and 4 to 6) must be orthogonal") + #CHECK THAT NO rotation specified for FS Grounded ice at base + if m.strcmp(md.mesh.domaintype(),'3D') and md.flowequation.isFS: + pos=numpy.nonzero(numpy.logical_and(md.mask.groundedice_levelset,md.mesh.vertexonbase)) + if numpy.any(numpy.logical_not(numpy.isnan(md.stressbalance.referential[pos,:]))): + md.checkmessage("no referential should be specified for basal vertices of grounded ice") + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'object',self,'class','stressbalance','fieldname','spcvx','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','stressbalance','fieldname','spcvy','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','stressbalance','fieldname','spcvz','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','stressbalance','fieldname','restol','format','Double') + WriteData(fid,'object',self,'class','stressbalance','fieldname','reltol','format','Double') + WriteData(fid,'object',self,'class','stressbalance','fieldname','abstol','format','Double','scale',1./yts) + WriteData(fid,'object',self,'class','stressbalance','fieldname','isnewton','format','Integer') + WriteData(fid,'object',self,'class','stressbalance','fieldname','FSreconditioning','format','Double') + WriteData(fid,'object',self,'class','stressbalance','fieldname','viscosity_overshoot','format','Double') + WriteData(fid,'object',self,'class','stressbalance','fieldname','maxiter','format','Integer') + WriteData(fid,'object',self,'class','stressbalance','fieldname','shelf_dampening','format','Integer') + WriteData(fid,'object',self,'class','stressbalance','fieldname','vertex_pairing','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'class','stressbalance','fieldname','penalty_factor','format','Double') + WriteData(fid,'object',self,'class','stressbalance','fieldname','rift_penalty_lock','format','Integer') + WriteData(fid,'object',self,'class','stressbalance','fieldname','rift_penalty_threshold','format','Integer') + WriteData(fid,'object',self,'class','stressbalance','fieldname','referential','format','DoubleMat','mattype',1) + + WriteData(fid,'data',self.loadingforce[:,0],'format','DoubleMat','mattype',1,'enum',LoadingforceXEnum()) + WriteData(fid,'data',self.loadingforce[:,1],'format','DoubleMat','mattype',1,'enum',LoadingforceYEnum()) + WriteData(fid,'data',self.loadingforce[:,2],'format','DoubleMat','mattype',1,'enum',LoadingforceZEnum()) + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',StressbalanceRequestedOutputsEnum(),'format','StringArray') + # }}} Index: ../trunk-jpl/src/py3/classes/plotoptions.py =================================================================== --- ../trunk-jpl/src/py3/classes/plotoptions.py (revision 0) +++ ../trunk-jpl/src/py3/classes/plotoptions.py (revision 19895) @@ -0,0 +1,116 @@ +from collections import OrderedDict, Counter, defaultdict +import pairoptions + +class plotoptions(object): + ''' + PLOTOPTIONS class definition + + Usage: + plotoptions=plotoptions(**kwargs) + ''' + + def __init__(self,**kwargs):# {{{ + self.numberofplots = 0 + self.figurenumber = 1 + self.list = OrderedDict() + + self.buildlist(**kwargs) + #}}} + def __repr__(self): #{{{ + s="\n" + s+=" numberofplots: %i\n" % self.numberofplots + s+=" figurenumber: %i\n" % self.figurenumber + if self.list: + s+=" list: (%ix%i)\n" % (len(self.list),2) + for item in list(self.list.items()): + #s+=" options of plot number %i\n" % item + if isinstance(item[1],str): + s+=" field: %-10s value: '%s'\n" % (item[0],item[1]) + elif isinstance(item[1],(bool,int,float)): + s+=" field: %-10s value: '%g'\n" % (item[0],item[1]) + else: + s+=" field: %-10s value: '%s'\n" % (item[0],item[1]) + else: + s+=" list: empty\n" + return s + #}}} + def buildlist(self,**kwargs): #{{{ + + for name,value in kwargs.items(): + self.rawlist[name] = value + + #get figure number + self.figurenumber=rawoptions.getfieldvalue('figure',1) + rawoptions.removefield('figure',0) + + #get number of subplots + numberofplots=Counter(x for sublist in rawlist for x in sublist if isinstance(x,str))['data'] + self.numberofplots=numberofplots + + #figure out whether alloptions flag is on + if rawoptions.getfieldvalue('alloptions','off') is 'on': + allflag=1 + else: + allflag=0 + + #initialize self.list (will need a list of dict's (or nested dict) for numberofplots>1) + #self.list=defaultdict(dict) + for i in range(numberofplots): + self.list[i]=pairoptions.pairoptions() + + #process plot options + for i in range(len(rawlist)): + + #if alloptions flag is on, apply to all plots + if (allflag and 'data' not in rawlist[i][0] and '#' not in rawlist[i][0]): + + for j in range(numberofplots): + self.list[j].addfield(rawlist[i][0],rawlist[i][1]) + + elif '#' in rawlist[i][0]: + + #get subplots associated + string=rawlist[i][0].split('#') + plotnums=string[-1].split(',') + field=string[0] + + #loop over plotnums + for k in range(len(plotnums)): + plotnum=plotnums[k] + + #Empty + if not plotnum: continue + + # '#all' + elif 'all' in plotnum: + for j in range(numberofplots): + self.list[j].addfield(field,rawlist[i][1]) + + # '#i-j' + elif '-' in plotnum: + nums=plotnum.split('-') + if len(nums)!=2: continue + if False in [x.isdigit() for x in nums]: + raise ValueError('error: in option i-j both i and j must be integers') + for j in range(int(nums[0])-1,int(nums[1])): + self.list[j].addfield(field,rawlist[i][1]) + + # Deal with #i + else: + #assign to subplot + if int(plotnum)>numberofplots: + raise ValueError('error: %s cannot be assigned %d which exceeds the number of subplots' % (field,plotnum)) + self.list[int(plotnum)-1].addfield(field,rawlist[i][1]) + else: + + #go through all subplots and assign key-value pairs + j=0 + while j <= numberofplots-1: + if not self.list[j].exist(rawlist[i][0]): + self.list[j].addfield(rawlist[i][0],rawlist[i][1]) + break + else: + j=j+1 + if j+1>numberofplots: + print(("WARNING: too many instances of '%s' in options" % rawlist[i][0])) + #}}} Index: ../trunk-jpl/src/py3/classes/steadystate.py =================================================================== --- ../trunk-jpl/src/py3/classes/steadystate.py (revision 0) +++ ../trunk-jpl/src/py3/classes/steadystate.py (revision 19895) @@ -0,0 +1,75 @@ +import numpy +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class steadystate(object): + """ + STEADYSTATE class definition + + Usage: + steadystate=steadystate(); + """ + + def __init__(self): # {{{ + self.reltol = 0 + self.maxiter = 0 + self.requested_outputs = [] + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' steadystate solution parameters:' + string="%s\n%s"%(string,fielddisplay(self,'reltol','relative tolerance criterion')) + string="%s\n%s"%(string,fielddisplay(self,'maxiter','maximum number of iterations')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional requested outputs')) + return string + #}}} + def defaultoutputs(self,md): # {{{ + + return md.stressbalance.defaultoutputs(md)+md.thermal.defaultoutputs(md) + + #}}} + def setdefaultparameters(self): # {{{ + + #maximum of steady state iterations + self.maxiter=100 + + #Relative tolerance for the steadystate convertgence + self.reltol=0.01 + + #default output + self.requested_outputs=['default'] + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if not solution==SteadystateSolutionEnum(): + return md + + if not md.timestepping.time_step==0: + md.checkmessage("for a steadystate computation, timestepping.time_step must be zero.") + + if numpy.isnan(md.stressbalance.reltol): + md.checkmessage("for a steadystate computation, stressbalance.reltol (relative convergence criterion) must be defined!") + + md = checkfield(md,'fieldname','steadystate.requested_outputs','stringrow',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','reltol','format','Double') + WriteData(fid,'object',self,'fieldname','maxiter','format','Integer') + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',SteadystateRequestedOutputsEnum(),'format','StringArray') + # }}} Index: ../trunk-jpl/src/py3/classes/geometry.py =================================================================== --- ../trunk-jpl/src/py3/classes/geometry.py (revision 0) +++ ../trunk-jpl/src/py3/classes/geometry.py (revision 19895) @@ -0,0 +1,64 @@ +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class geometry(object): + """ + GEOMETRY class definition + + Usage: + geometry=geometry(); + """ + + def __init__(self): # {{{ + self.surface = float('NaN') + self.thickness = float('NaN') + self.base = float('NaN') + self.bed = float('NaN') + self.hydrostatic_ratio = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + + string=" geometry parameters:" + string="%s\n%s"%(string,fielddisplay(self,'surface','ice upper surface elevation [m]')) + string="%s\n%s"%(string,fielddisplay(self,'thickness','ice thickness [m]')) + string="%s\n%s"%(string,fielddisplay(self,'base','ice base elevation [m]')) + string="%s\n%s"%(string,fielddisplay(self,'bed','bed elevation [m]')) + return string + #}}} + def extrude(self,md): # {{{ + self.surface=project3d(md,'vector',self.surface,'type','node') + self.thickness=project3d(md,'vector',self.thickness,'type','node') + self.hydrostatic_ratio=project3d(md,'vector',self.hydrostatic_ratio,'type','node') + self.base=project3d(md,'vector',self.base,'type','node') + self.bed=project3d(md,'vector',self.bed,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,'fieldname','geometry.surface' ,'NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','geometry.base' ,'NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','geometry.thickness','NaN',1,'size',[md.mesh.numberofvertices],'>',0,'timeseries',1) + if any(abs(self.thickness-self.surface+self.base)>10**-9): + md.checkmessage("equality thickness=surface-base violated") + if solution==TransientSolutionEnum() and md.transient.isgroundingline: + md = checkfield(md,'fieldname','geometry.bed','NaN',1,'size',[md.mesh.numberofvertices]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'data',self.surface,'format','DoubleMat','mattype',1,'enum',SurfaceEnum()) + WriteData(fid,'data',self.thickness,'format','DoubleMat','mattype',1,'enum',ThicknessEnum(),'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'data',self.base,'format','DoubleMat','mattype',1,'enum',BaseEnum()) + WriteData(fid,'data',self.bed,'format','DoubleMat','mattype',1,'enum',BedEnum()) + WriteData(fid,'object',self,'fieldname','hydrostatic_ratio','format','DoubleMat','mattype',1) + # }}} Index: ../trunk-jpl/src/py3/classes/mismipbasalforcings.py =================================================================== --- ../trunk-jpl/src/py3/classes/mismipbasalforcings.py (revision 0) +++ ../trunk-jpl/src/py3/classes/mismipbasalforcings.py (revision 19895) @@ -0,0 +1,98 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import numpy + +class mismipbasalforcings(object): + """ + MISMIP Basal Forcings class definition + + Usage: + mismipbasalforcings=mismipbasalforcings() + """ + + def __init__(self,md): # {{{ + + self.groundedice_melting_rate = float('NaN') + self.meltrate_factor = float('NaN') + self.threshold_thickness = float('NaN') + self.upperdepth_melt = float('NaN') + self.geothermalflux = float('NaN') + + if numpy.all(numpy.isnan(self.groundedice_melting_rate)): + self.groundedice_melting_rate=numpy.zeros(md.mesh.numberofvertices) + print(' no basalforcings.groundedice_melting_rate specified: values set as zero') + + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" MISMIP+ basal melt parameterization\n" + string="%s\n%s"%(string,fielddisplay(self,"groundedice_melting_rate","basal melting rate (positive if melting) [m/yr]")) + string="%s\n%s"%(string,fielddisplay(self,"meltrate_factor","Melt-rate rate factor [1/yr] (sign is opposite to MISMIP+ benchmark to remain consistent with ISSM convention of positive values for melting)")) + string="%s\n%s"%(string,fielddisplay(self,"threshold_thickness","Threshold thickness for saturation of basal melting [m]")) + string="%s\n%s"%(string,fielddisplay(self,"upperdepth_melt","Depth above which melt rate is zero [m]")) + string="%s\n%s"%(string,fielddisplay(self,"geothermalflux","Geothermal heat flux [W/m^2]")) + + return string + #}}} + def extrude(self,md): # {{{ + self.coefficient=project3d(md,'vector',self.coefficient,'type','node','layer',1) + self.p=project3d(md,'vector',self.p,'type','element') + self.q=project3d(md,'vector',self.q,'type','element') + return self + #}}} + def setdefaultparameters(self): # {{{ + + # default values for melting parameterization + self.meltrate_factor = 0.2 + self.threshold_thickness = 75. + self.upperdepth_melt = -100. + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if MasstransportAnalysisEnum() in analyses and not (solution==TransientSolutionEnum() and md.transient.ismasstransport==0): + + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1]) + + if BalancethicknessAnalysisEnum() in analyses: + + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1]) + + if ThermalAnalysisEnum() in analyses and not (solution==TransientSolutionEnum() and md.transient.isthermal==0): + + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.geothermalflux','NaN',1,'timeseries',1,'>=',0) + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=md.constants.yts + if yts!=365.2422*24.*3600.: + print('WARNING: value of yts for MISMIP+ runs different from ISSM default!') + + floatingice_melting_rate = numpy.zeros((md.mesh.numberofvertices,1)) + floatingice_melting_rate = md.basalforcings.meltrate_factor*numpy.tanh((md.geometry.base-md.geometry.bed)/md.basalforcings.threshold_thickness)*numpy.amax(md.basalforcings.upperdepth_melt-md.geometry.base,0) + + WriteData(fid,'enum',BasalforcingsEnum(),'data',MismipFloatingMeltRateEnum(),'format','Integer') + WriteData(fid,'data',floatingice_melting_rate,'format','DoubleMat','enum',BasalforcingsFloatingiceMeltingRateEnum(),'mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','groundedice_melting_rate','format','DoubleMat','enum',BasalforcingsGroundediceMeltingRateEnum(),'mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','geothermalflux','enum',BasalforcingsGeothermalfluxEnum(),'format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','meltrate_factor','format','Double','enum',BasalforcingsMeltrateFactorEnum(),'scale',1./yts) + WriteData(fid,'object',self,'fieldname','threshold_thickness','format','Double','enum',BasalforcingsThresholdThicknessEnum()) + WriteData(fid,'object',self,'fieldname','upperdepth_melt','format','Double','enum',BasalforcingsUpperdepthMeltEnum()) + + # }}} Index: ../trunk-jpl/src/py3/classes/independent.py =================================================================== --- ../trunk-jpl/src/py3/classes/independent.py (revision 0) +++ ../trunk-jpl/src/py3/classes/independent.py (revision 19895) @@ -0,0 +1,70 @@ +import numpy +from pairoptions import pairoptions +from fielddisplay import fielddisplay +import MatlabFuncs as m +from EnumDefinitions import * + +class independent(object): + """ + INDEPENDENT class definition + + Usage: + independent=independent(); + """ + + def __init__(self,**kwargs): # {{{ + self.name = '' + self.type = '' + self.fos_forward_index = float('NaN') + self.fov_forward_indices = numpy.array([]) + self.nods = 0 + + #set defaults + self.setdefaultparameters() + + #use provided options to change fields + options=pairoptions(**kwargs) + + #OK get other fields + self=options.AssignObjectFields(self) + # }}} + def __repr__(self): # {{{ + s =" independent variable:\n" + + s+="%s\n" % fielddisplay(self,'name',"variable name (must match corresponding Enum)") + s+="%s\n" % fielddisplay(self,'type',"type of variable ('vertex' or 'scalar')") + if not numpy.isnan(self.fos_forward_index): + s+="%s\n" % fielddisplay(self,'fos_forward_index',"index for fos_foward driver of ADOLC") + if numpy.any(numpy.logical_not(numpy.isnan(self.fov_forward_indices))): + s+="%s\n" % fielddisplay(self,'fov_forward_indices',"indices for fov_foward driver of ADOLC") + + return s + # }}} + def setdefaultparameters(self): # {{{ + #do nothing + return self + # }}} + def checkconsistency(self,md,i,solution,analyses,driver): # {{{ + if not numpy.isnan(self.fos_forward_index): + if not strcmpi(driver,'fos_forward'): + raise TypeError("cannot declare an independent with a fos_forward_index when the driver is not fos_forward!") + if self.nods==0: + raise TypeError("independent checkconsistency error: nods should be set to the size of the independent variable") + + if self.fov_forward_indices: + if not strcmpi(driver,'fov_forward'): + raise TypeError("cannot declare an independent with fov_forward_indices when the driver is not fov_forward!") + if self.nods==0: + raise TypeError("independent checkconsistency error: nods should be set to the size of the independent variable") + md = checkfield(md,'fieldname',"autodiff.independents[%d].fov_forward_indices" % i,'>=',1,'<=',self.nods,'size',[float('NaN'),1]) + + return md + # }}} + def typetoscalar(self): # {{{ + if strcmpi(self.type,'scalar'): + scalar=0 + elif strcmpi(self.type,'vertex'): + scalar=1 + + return scalar + # }}} Index: ../trunk-jpl/src/py3/classes/hydrologyshreve.py =================================================================== --- ../trunk-jpl/src/py3/classes/hydrologyshreve.py (revision 0) +++ ../trunk-jpl/src/py3/classes/hydrologyshreve.py (revision 19895) @@ -0,0 +1,54 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class hydrologyshreve(object): + """ + HYDROLOGYSHREVE class definition + + Usage: + hydrologyshreve=hydrologyshreve(); + """ + + def __init__(self): # {{{ + self.spcwatercolumn = float('NaN') + self.stabilization = 0 + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + + string=' hydrologyshreve solution parameters:' + string="%s\n%s"%(string,fielddisplay(self,'spcwatercolumn','water thickness constraints (NaN means no constraint) [m]')) + string="%s\n%s"%(string,fielddisplay(self,'stabilization','artificial diffusivity (default is 1). can be more than 1 to increase diffusivity.')) + return string + #}}} + def extrude(self,md): # {{{ + return self + #}}} + def setdefaultparameters(self): # {{{ + + #Type of stabilization to use 0:nothing 1:artificial_diffusivity + self.stabilization=1 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if HydrologyShreveAnalysisEnum() not in analyses: + return md + + md = checkfield(md,'fieldname','hydrology.spcwatercolumn','timeseries',1) + md = checkfield(md,'fieldname','hydrology.stabilization','>=',0) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',HydrologyModelEnum(),'data',HydrologyshreveEnum(),'format','Integer'); + WriteData(fid,'object',self,'fieldname','spcwatercolumn','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','stabilization','format','Double') + # }}} Index: ../trunk-jpl/src/py3/classes/organizer.py =================================================================== --- ../trunk-jpl/src/py3/classes/organizer.py (revision 0) +++ ../trunk-jpl/src/py3/classes/organizer.py (revision 19895) @@ -0,0 +1,179 @@ +import os.path +from collections import OrderedDict +import pairoptions +from loadvars import loadvars +from loadmodel import loadmodel +from savevars import savevars +from model import model +from dbm.ndbm import whichdb +import MatlabFuncs as m + +class organizer(object): + """ + ORGANIZER class definition + + Supported options: + repository: directory where all models will be saved + prefix: prefix for saved model names + steps: requested steps + trunkprefix:prefix of previous run with a different prefix. Used to branch. + + Usage: + org = organizer(varargin) + + Examples: + org = organizer('repository','Models/','prefix','AGU2015','steps',0); %build an empty organizer object with a given repository + """ + + def __init__(self,**kwargs): # {{{ + self._currentstep =0 + self.repository ='./' + self.prefix ='model.' + self.trunkprefix ='' + self.steps =[] + self.requestedsteps=[0] + + #process options + options=pairoptions.pairoptions(**kwargs) + + #Get prefix + prefix=options.getfieldvalue('prefix','model.') + if not isinstance(prefix,str): + raise TypeError("prefix is not a string") + if not m.strcmp(prefix,prefix.strip()) or len(prefix.split()) > 1: + raise TypeError("prefix should not have any white space") + self.prefix=prefix + + #Get repository + repository=options.getfieldvalue('repository','./') + if not isinstance(repository,str): + raise TypeError("repository is not a string") + if not os.path.isdir(repository): + raise IOError("Directory '%s' not found" % repository) + self.repository=repository + + #Get steps + self.requestedsteps=options.getfieldvalue('steps',[0]) + + #Get trunk prefix (only if provided by user) + if options.exist('trunkprefix'): + trunkprefix=options.getfieldvalue('trunkprefix','') + if not isinstance(trunkprefix,str): + raise TypeError("trunkprefix is not a string") + if not m.strcmp(trunkprefix,trunkprefix.strip()) or len(trunkprefix.split()) > 1: + raise TypeError("trunkprefix should not have any white space") + self.trunkprefix=trunkprefix + #}}} + def __repr__(self): # {{{ + s ="" + + s+="%s\n" % " Repository: '%s'" % self.repository + s+="%s\n" % " Prefix: '%s'" % self.prefix + if not self.steps: + s+="%s\n" % " no step" + else: + for step in self.steps: + s+="%s\n" % " step #%2i: '%s'",step['id'],step['string'] + #}}} + def load(self,string): # {{{ + + #Get model path + if not isinstance(string,str): + raise TypeError("argument provided is not a string") + path=os.path.join(self.repository,self.prefix+string) + + #figure out if the model is there + if os.path.exists(path): + struc=loadvars(path) + name=name=[key for key in list(struc.keys())] + md=struc.name[0] + else: + raise IOError("Could not find '%s'" % path) + + return md + #}}} + def loadmodel(self,string): # {{{ + + #Get model path + if not isinstance(string,str): + raise TypeError("argument provided is not a string") + path1=os.path.join(self.repository,self.prefix+'.step#'+string+'.python') + path2=os.path.join(self.repository,string) + + #figure out if the model is there, otherwise, we have to use the default path supplied by user. + if whichdb(path1): + md=loadmodel(path1) + return md + elif whichdb(path2): + md=loadmodel(path2) + return md + + #If we are here, the model has not been found. Try trunk prefix if provided + if self.trunkprefix: + path2=os.path.join(self.repository,self.trunkprefix+string) + if not os.path.exists(path2): + raise IOError("Could find neither '%s' nor '%s'" % (path,path2)) + else: + print(("--> Branching '%s' from trunk '%s'" % (self.prefix,self.trunkprefix))) + md=loadmodel(path2) + return md + else: + raise IOError("Could not find '%s'" % path1) + #}}} + def perform(self,string): # {{{ + + bool=False + + #Some checks + if not isinstance(string,str): + raise TypeError("Step provided should be a string") + if not m.strcmp(string,string.strip()) or len(string.split()) > 1: + raise TypeError("Step provided should not have any white space") + if self._currentstep>0 and string in [step['string'] for step in self.steps]: + raise RuntimeError("Step '%s' already present. Change name" % string) + + #Add step + self.steps.append(OrderedDict()) + self.steps[-1]['id']=len(self.steps) + self.steps[-1]['string']=string + self._currentstep+=1 + + #if requestedsteps = 0, print all steps in self + if 0 in self.requestedsteps: + if self._currentstep==1: + print((" prefix: %s" % self.prefix)) + print((" step #%i : %s" % (self.steps[self._currentstep-1]['id'],self.steps[self._currentstep-1]['string']))) + + #Ok, now if _currentstep is a member of steps, return true + if self._currentstep in self.requestedsteps: + print(("\n step #%i : %s\n" % (self.steps[self._currentstep-1]['id'],self.steps[self._currentstep-1]['string']))) + bool=True + + #assign self back to calling workspace + # (no need, since Python modifies class instance directly) + + return bool + #}}} + def savemodel(self,md, name='default'): # {{{ + + #check + if self._currentstep==0: + raise RuntimeError("Cannot save model because organizer (org) is empty! Make sure you did not skip any perform call") + if self._currentstep>len(self.steps): + raise RuntimeError("Cannot save model because organizer (org) is not up to date!") + + if (name=='default'): + name=os.path.join(self.repository,self.prefix+'step#'+self.steps[self._currentstep-1]['string']+'.python') + else: + name=os.path.join(self.repository,name) + print(("saving model as: '%s'" % name)) + + #check that md is a model + if not isinstance(md,model): + print("second argument is not a model") + if self._currentstep>len(self.steps): + raise RuntimeError("organizer error message: element with id %d not found" % self._currentstep) + + #save model + savevars(name,'md',md) + #}}} Index: ../trunk-jpl/src/py3/classes/results.py =================================================================== --- ../trunk-jpl/src/py3/classes/results.py (revision 0) +++ ../trunk-jpl/src/py3/classes/results.py (revision 19895) @@ -0,0 +1,52 @@ +import numpy +from fielddisplay import fielddisplay +import MatlabFuncs as m +from EnumDefinitions import * + +class results(object): + """ + RESULTS class definition + + Usage: + results=results(); + """ + + def __init__(self): # {{{ + pass + # }}} + def __repr__(self): # {{{ + s =" Model results:\n" + + if 'step' in self.__dict__: + s+="%s\n" % fielddisplay(self,'step',"step number") + if 'time' in self.__dict__: + s+="%s\n" % fielddisplay(self,'time',"time value") + if 'SolutionType' in self.__dict__: + s+="%s\n" % fielddisplay(self,'SolutionType',"solution type") + + for name in list(self.__dict__.keys()): + if name not in ['step','time','SolutionType','errlog','outlog']: + if isinstance(getattr(self,name),list): + s+="%s\n" % fielddisplay(self,name,"model results list") + elif isinstance(getattr(self,name),results): + s+="%s\n" % fielddisplay(self,name,"model results case") + else: + s+="%s\n" % fielddisplay(self,name,"") + + if 'errlog' in self.__dict__: + s+="%s\n" % fielddisplay(self,'errlog',"error log file") + if 'outlog' in self.__dict__: + s+="%s\n" % fielddisplay(self,'outlog',"output log file") + + return s + # }}} + def setdefaultparameters(self): # {{{ + #do nothing + return self + # }}} + def checkconsistency(self,md,solution,analyses): # {{{ + return md + # }}} + def marshall(self,md,fid): # {{{ + pass + # }}} Index: ../trunk-jpl/src/py3/classes/matice.py =================================================================== --- ../trunk-jpl/src/py3/classes/matice.py (revision 0) +++ ../trunk-jpl/src/py3/classes/matice.py (revision 19895) @@ -0,0 +1,157 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData + +class matice(object): + """ + MATICE class definition + + Usage: + matice=matice(); + """ + + def __init__(self): # {{{ + self.rho_ice = 0. + self.rho_water = 0. + self.rho_freshwater = 0. + self.mu_water = 0. + self.heatcapacity = 0. + self.latentheat = 0. + self.thermalconductivity = 0. + self.temperateiceconductivity = 0. + self.meltingpoint = 0. + self.beta = 0. + self.mixed_layer_capacity = 0. + self.thermal_exchange_velocity = 0. + self.rheology_B = float('NaN') + self.rheology_n = float('NaN') + self.rheology_law = '' + + #gia: + self.lithosphere_shear_modulus = 0. + self.lithosphere_density = 0. + self.mantle_shear_modulus = 0. + self.mantle_density = 0. + + self.setdefaultparameters() + #}}} + def __repr__(self): # {{{ + string=" Materials:" + + string="%s\n%s"%(string,fielddisplay(self,"rho_ice","ice density [kg/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"rho_water","water density [kg/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"rho_freshwater","fresh water density [kg/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"mu_water","water viscosity [N s/m^2]")) + string="%s\n%s"%(string,fielddisplay(self,"heatcapacity","heat capacity [J/kg/K]")) + string="%s\n%s"%(string,fielddisplay(self,"thermalconductivity","ice thermal conductivity [W/m/K]")) + string="%s\n%s"%(string,fielddisplay(self,"temperateiceconductivity","temperate ice thermal conductivity [W/m/K]")) + string="%s\n%s"%(string,fielddisplay(self,"meltingpoint","melting point of ice at 1atm in K")) + string="%s\n%s"%(string,fielddisplay(self,"latentheat","latent heat of fusion [J/m^3]")) + string="%s\n%s"%(string,fielddisplay(self,"beta","rate of change of melting point with pressure [K/Pa]")) + string="%s\n%s"%(string,fielddisplay(self,"mixed_layer_capacity","mixed layer capacity [W/kg/K]")) + string="%s\n%s"%(string,fielddisplay(self,"thermal_exchange_velocity","thermal exchange velocity [m/s]")) + string="%s\n%s"%(string,fielddisplay(self,"rheology_B","flow law parameter [Pa/s^(1/n)]")) + string="%s\n%s"%(string,fielddisplay(self,"rheology_n","Glen's flow law exponent")) + string="%s\n%s"%(string,fielddisplay(self,"rheology_law","law for the temperature dependance of the rheology: 'None', 'Cuffey', 'Paterson', 'Arrhenius' or 'LliboutryDuval'")) + string="%s\n%s"%(string,fielddisplay(self,"lithosphere_shear_modulus","Lithosphere shear modulus [Pa]")) + string="%s\n%s"%(string,fielddisplay(self,"lithosphere_density","Lithosphere density [g/cm^-3]")) + string="%s\n%s"%(string,fielddisplay(self,"mantle_shear_modulus","Mantle shear modulus [Pa]")) + string="%s\n%s"%(string,fielddisplay(self,"mantle_density","Mantle density [g/cm^-3]")) + + return string + #}}} + def extrude(self,md): # {{{ + self.rheology_B=project3d(md,'vector',self.rheology_B,'type','node') + self.rheology_n=project3d(md,'vector',self.rheology_n,'type','element') + return self + #}}} + def setdefaultparameters(self): # {{{ + #ice density (kg/m^3) + self.rho_ice=917. + + #ocean water density (kg/m^3) + self.rho_water=1023. + + #fresh water density (kg/m^3) + self.rho_freshwater=1000. + + #water viscosity (N.s/m^2) + self.mu_water=0.001787 + + #ice heat capacity cp (J/kg/K) + self.heatcapacity=2093. + + #ice latent heat of fusion L (J/kg) + self.latentheat=3.34*10**5 + + #ice thermal conductivity (W/m/K) + self.thermalconductivity=2.4 + + #temperate ice thermal conductivity (W/m/K) + self.temperateiceconductivity=0.24 + + #the melting point of ice at 1 atmosphere of pressure in K + self.meltingpoint=273.15 + + #rate of change of melting point with pressure (K/Pa) + self.beta=9.8*10**-8 + + #mixed layer (ice-water interface) heat capacity (J/kg/K) + self.mixed_layer_capacity=3974. + + #thermal exchange velocity (ice-water interface) (m/s) + self.thermal_exchange_velocity=1.00*10**-4 + + #Rheology law: what is the temperature dependence of B with T + #available: none, paterson and arrhenius + self.rheology_law='Paterson' + + # GIA: + self.lithosphere_shear_modulus = 6.7*10**10 # (Pa) + self.lithosphere_density = 3.32 # (g/cm^-3) + self.mantle_shear_modulus = 1.45*10**11 # (Pa) + self.mantle_density = 3.34 # (g/cm^-3) + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + md = checkfield(md,'fieldname','materials.rho_ice','>',0) + md = checkfield(md,'fieldname','materials.rho_water','>',0) + md = checkfield(md,'fieldname','materials.rho_freshwater','>',0) + md = checkfield(md,'fieldname','materials.mu_water','>',0) + md = checkfield(md,'fieldname','materials.rheology_B','>',0,'timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','materials.rheology_n','>',0,'size',[md.mesh.numberofelements]) + md = checkfield(md,'fieldname','materials.rheology_law','values',['None','Cuffey','Paterson','Arrhenius','LliboutryDuval']) + md = checkfield(md,'fieldname','materials.lithosphere_shear_modulus','>',0,'numel',[1]); + md = checkfield(md,'fieldname','materials.lithosphere_density','>',0,'numel',[1]); + md = checkfield(md,'fieldname','materials.mantle_shear_modulus','>',0,'numel',[1]); + md = checkfield(md,'fieldname','materials.mantle_density','>',0,'numel',[1]); + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',MaterialsEnum(),'data',MaticeEnum(),'format','Integer'); + WriteData(fid,'object',self,'class','materials','fieldname','rho_ice','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','rho_water','enum',MaterialsRhoSeawaterEnum(),'format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','rho_freshwater','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','mu_water','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','heatcapacity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','latentheat','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','thermalconductivity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','temperateiceconductivity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','meltingpoint','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','beta','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','mixed_layer_capacity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','thermal_exchange_velocity','format','Double') + WriteData(fid,'object',self,'class','materials','fieldname','rheology_B','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','materials','fieldname','rheology_B','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','materials','fieldname','rheology_n','format','DoubleMat','mattype',2) + WriteData(fid,'data',StringToEnum(self.rheology_law)[0],'enum',MaterialsRheologyLawEnum(),'format','Integer') + + WriteData(fid,'object',self,'class','materials','fieldname','lithosphere_shear_modulus','format','Double'); + WriteData(fid,'object',self,'class','materials','fieldname','lithosphere_density','format','Double','scale',10.**3.); + WriteData(fid,'object',self,'class','materials','fieldname','mantle_shear_modulus','format','Double'); + WriteData(fid,'object',self,'class','materials','fieldname','mantle_density','format','Double','scale',10.**3.); + # }}} Index: ../trunk-jpl/src/py3/classes/qmu.py =================================================================== --- ../trunk-jpl/src/py3/classes/qmu.py (revision 0) +++ ../trunk-jpl/src/py3/classes/qmu.py (revision 19895) @@ -0,0 +1,157 @@ +import numpy +from project3d import project3d +from collections import OrderedDict +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class qmu(object): + """ + QMU class definition + + Usage: + qmu=qmu(); + """ + + def __init__(self): # {{{ + self.isdakota = 0 + self.variables = OrderedDict() + self.responses = OrderedDict() + self.method = OrderedDict() + self.params = OrderedDict() + self.results = OrderedDict() + self.partition = float('NaN') + self.numberofpartitions = 0 + self.numberofresponses = 0 + self.variabledescriptors = [] + self.responsedescriptors = [] + self.mass_flux_profile_directory = float('NaN') + self.mass_flux_profiles = float('NaN') + self.mass_flux_segments = [] + self.adjacency = float('NaN') + self.vertex_weight = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + s =' qmu parameters:\n' + + s+="%s\n" % fielddisplay(self,'isdakota','is qmu analysis activated?') + for i,variable in enumerate(self.variables.items()): + s+=" variables%s: (arrays of each variable class)\n" % \ + string_dim(self.variables,i) + fnames=vars(variable) + maxlen=0 + for fname in fnames: + maxlen=max(maxlen,len(fname)) + + for fname in fnames: + s+="' %-*s: [%ix%i] '%s'\n" % \ + (maxlen+1,fname,size(getattr(variable,fname)),type(getattr(variable,fname))) + + for i,response in enumerate(self.responses.items()): + s+=" responses%s: (arrays of each response class)\n" % \ + string_dim(self.responses,i) + fnames=vars(response) + maxlen=0 + for fname in fnames: + maxlen=max(maxlen,len(fname)) + + for fname in fnames: + s+=" %-*s: [%ix%i] '%s'\n" % \ + (maxlen+1,fname,size(getattr(response,fname)),type(getattr(response,fname))) + + s+="%s\n" % fielddisplay(self,'numberofresponses','number of responses') + + for i,method in enumerate(self.method.items()): + if isinstance(method,'dakota_method'): + s+=" method%s : '%s'\n" % \ + (string_dim(method,i),method.method) + + for i,param in enumerate(self.params.items()): + s+=" params%s: (array of method-independent parameters)\n" % \ + string_dim(self.params,i) + fnames=vars(param) + maxlen=0 + for fname in fnames: + maxlen=max(maxlen,len(fname)) + + for fname in fnames: + s+=" %-*s: %s\n" % \ + (maxlen+1,fname,any2str(getattr(param,fname))) + + for i,result in enumerate(self.results.items()): + s+=" results%s: (information from dakota files)\n" % \ + string_dim(self.results,i) + fnames=vars(result) + maxlen=0 + for fname in fnames: + maxlen=max(maxlen,len(fname)) + + for fname in fnames: + s+=" %-*s: [%ix%i] '%s'\n" % \ + (maxlen+1,fname,size(getattr(result,fname)),type(getattr(result,fname))) + + s+="%s\n" % fielddisplay(self,'partition','user provided mesh partitioning, defaults to metis if not specified') + s+="%s\n" % fielddisplay(self,'numberofpartitions','number of partitions for semi-discrete qmu') + s+="%s\n" % fielddisplay(self,'variabledescriptors','') + s+="%s\n" % fielddisplay(self,'responsedescriptors','') + s+="%s\n" % fielddisplay(self,'method','array of dakota_method class') + s+="%s\n" % fielddisplay(self,'mass_flux_profile_directory','directory for mass flux profiles') + s+="%s\n" % fielddisplay(self,'mass_flux_profiles','list of mass_flux profiles') + s+="%s\n" % fielddisplay(self,'mass_flux_segments','') + s+="%s\n" % fielddisplay(self,'adjacency','') + s+="%s\n" % fielddisplay(self,'vertex_weight','weight applied to each mesh vertex') + + return s + # }}} + def extrude(self,md): # {{{ + self.partition=project3d(md,'vector',numpy.transpose(self.partition),'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if not md.qmu.isdakota: + return + + if not md.qmu.params.evaluation_concurrency==1: + md.checkmessage("concurrency should be set to 1 when running dakota in library mode") + if md.qmu.partition: + if not numpy.size(md.qmu.partition)==md.mesh.numberofvertices: + md.checkmessage("user supplied partition for qmu analysis should have size md.mesh.numberofvertices x 1") + if not min(md.qmu.partition)==0: + md.checkmessage("partition vector not indexed from 0 on") + if max(md.qmu.partition)>=md.qmu.numberofpartitions: + md.checkmessage("for qmu analysis, partitioning vector cannot go over npart, number of partition areas") + + if not m.strcmpi(md.cluster.name,'none'): + if not md.settings.waitonlock: + md.checkmessage("waitonlock should be activated when running qmu in parallel mode!") + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','isdakota','format','Boolean') + if not self.isdakota: + WriteData(fid,'data',False,'enum',QmuMassFluxSegmentsPresentEnum(),'format','Boolean'); + return + WriteData(fid,'object',self,'fieldname','partition','format','DoubleMat','mattype',2) + WriteData(fid,'object',self,'fieldname','numberofpartitions','format','Integer') + WriteData(fid,'object',self,'fieldname','numberofresponses','format','Integer') + WriteData(fid,'object',self,'fieldname','variabledescriptors','format','StringArray') + WriteData(fid,'object',self,'fieldname','responsedescriptors','format','StringArray') + if not self.mass_flux_segments: + WriteData(fid,'data',self.mass_flux_segments,'enum',MassFluxSegmentsEnum(),'format','MatArray'); + flag=True; + else: + flag=False; + WriteData(fid,'data',flag,'enum',QmuMassFluxSegmentsPresentEnum(),'format','Boolean'); + # }}} Index: ../trunk-jpl/src/py3/classes/miscellaneous.py =================================================================== --- ../trunk-jpl/src/py3/classes/miscellaneous.py (revision 0) +++ ../trunk-jpl/src/py3/classes/miscellaneous.py (revision 19895) @@ -0,0 +1,41 @@ +from collections import OrderedDict +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class miscellaneous(object): + """ + MISCELLANEOUS class definition + + Usage: + miscellaneous=miscellaneous(); + """ + + def __init__(self): # {{{ + self.notes = '' + self.name = '' + self.dummy = OrderedDict() + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' miscellaneous parameters:' + + string="%s\n%s"%(string,fielddisplay(self,'notes','notes in a cell of strings')) + string="%s\n%s"%(string,fielddisplay(self,'name','model name')) + string="%s\n%s"%(string,fielddisplay(self,'dummy','empty field to store some data')) + return string + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + md = checkfield(md,'fieldname','miscellaneous.name','empty',1) + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','name','format','String') + # }}} Index: ../trunk-jpl/src/py3/classes/dependent.py =================================================================== --- ../trunk-jpl/src/py3/classes/dependent.py (revision 0) +++ ../trunk-jpl/src/py3/classes/dependent.py (revision 19895) @@ -0,0 +1,88 @@ +import os.path +import numpy +from pairoptions import pairoptions +from fielddisplay import fielddisplay +import MatlabFuncs as m +from EnumDefinitions import * +from MeshProfileIntersection import MeshProfileIntersection + +class dependent(object): + """ + DEPENDENT class definition + + Usage: + dependent=dependent(); + """ + + def __init__(self,*args): # {{{ + self.name = '' + self.type = '' + self.fos_reverse_index = float('NaN') + self.exp = '' + self.segments = [] + self.index = -1 + self.nods = 0 + + #set defaults + self.setdefaultparameters() + + #use provided options to change fields + options=pairoptions(**kwargs) + + self.name=options.getfieldvalue('name','') + self.type=options.getfieldvalue('type','') + self.exp=options.getfieldvalue('exp','') + self.segments=options.getfieldvalue('segments',[]) + self.index=options.getfieldvalue('index',-1) + self.nods=options.getfieldvalue('nods',0) + + #if name is mass flux: + if strcmpi(self.name,'MassFlux'): + #make sure that we supplied a file and that it exists! + if not os.path.exists(self.exp): + raise IOError("dependent checkconsistency: specified 'exp' file does not exist!") + #process the file and retrieve segments + mesh=options.getfieldvalue('mesh') + self.segments=MeshProfileIntersection(mesh.elements,mesh.x,mesh.y,self.exp) + # }}} + def __repr__(self): # {{{ + s =" dependent variable:\n" + + s+="%s\n" % fielddisplay(self,'name',"variable name (must match corresponding Enum)") + s+="%s\n" % fielddisplay(self,'type',"type of variable ('vertex' or 'scalar')") + + if not numpy.isnan(self.fos_reverse_index): + s+="%s\n" % fielddisplay(self,'fos_reverse_index',"index for fos_reverse driver of ADOLC") + if self.exp: + s+="%s\n" % fielddisplay(self,'exp',"file needed to compute dependent variable") + s+="%s\n" % fielddisplay(self,'segments',"mass flux segments") + + return s + # }}} + def setdefaultparameters(self): # {{{ + #do nothing + return self + # }}} + def checkconsistency(self,md,solution,analyses): # {{{ + if strcmpi(self.name,'MassFlux'): + if not self.segments: + raise RuntimeError("dependent checkconsistency error: need segments to compute this dependent response") + if self.index<0: + raise RuntimeError("dependent checkconsistency error: index for segments should be >=0") + + if not numpy.isnan(self.fos_reverse_index): + if not strcmpi(driver,'fos_reverse'): + raise TypeError("cannot declare a dependent with a fos_reverse_index when the driver is not fos_reverse!") + if self.nods==0: + raise TypeError("dependent checkconsistency error: nods should be set to the size of the independent variable") + + return md + # }}} + def typetoscalar(self): # {{{ + if strcmpi(self.type,'scalar'): + scalar=0 + elif strcmpi(self.type,'vertex'): + scalar=1 + + return scalar + # }}} Index: ../trunk-jpl/src/py3/classes/timestepping.py =================================================================== --- ../trunk-jpl/src/py3/classes/timestepping.py (revision 0) +++ ../trunk-jpl/src/py3/classes/timestepping.py (revision 19895) @@ -0,0 +1,76 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class timestepping(object): + """ + TIMESTEPPING Class definition + + Usage: + timestepping=timestepping(); + """ + + def __init__(self): # {{{ + self.start_time = 0. + self.final_time = 0. + self.time_step = 0. + self.time_adapt = 0 + self.cfl_coefficient = 0. + self.interp_forcings = 1 + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" timestepping parameters:" + string="%s\n%s"%(string,fielddisplay(self,"start_time","simulation starting time [yr]")) + string="%s\n%s"%(string,fielddisplay(self,"final_time","final time to stop the simulation [yr]")) + string="%s\n%s"%(string,fielddisplay(self,"time_step","length of time steps [yr]")) + string="%s\n%s"%(string,fielddisplay(self,"time_adapt","use cfl condition to define time step ? (0 or 1) ")) + string="%s\n%s"%(string,fielddisplay(self,"cfl_coefficient","coefficient applied to cfl condition")) + string="%s\n%s"%(string,fielddisplay(self,"interp_forcings","interpolate in time between requested forcing values ? (0 or 1)")) + return string + #}}} + def setdefaultparameters(self): # {{{ + + #time between 2 time steps + self.time_step=1./2. + + #final time + self.final_time=10.*self.time_step + + #time adaptation? + self.time_adapt=0 + self.cfl_coefficient=0.5 + + #should we interpolate forcings between timesteps? + self.interp_forcings=1 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,'fieldname','timestepping.start_time','numel',[1],'NaN',1) + md = checkfield(md,'fieldname','timestepping.final_time','numel',[1],'NaN',1) + md = checkfield(md,'fieldname','timestepping.time_step','numel',[1],'>=',0,'NaN',1) + md = checkfield(md,'fieldname','timestepping.time_adapt','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','timestepping.cfl_coefficient','numel',[1],'>',0,'<=',1) + if self.final_time-self.start_time<0: + md.checkmessage("timestepping.final_time should be larger than timestepping.start_time") + md = checkfield(md,'fieldname','timestepping.interp_forcings','numel',[1],'values',[0,1]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=md.constants.yts + + WriteData(fid,'object',self,'fieldname','start_time','format','Double','scale',yts) + WriteData(fid,'object',self,'fieldname','final_time','format','Double','scale',yts) + WriteData(fid,'object',self,'fieldname','time_step','format','Double','scale',yts) + WriteData(fid,'object',self,'fieldname','time_adapt','format','Boolean') + WriteData(fid,'object',self,'fieldname','cfl_coefficient','format','Double') + WriteData(fid,'object',self,'fieldname','interp_forcings','format','Boolean') + # }}} Index: ../trunk-jpl/src/py3/classes/toolkits.py =================================================================== --- ../trunk-jpl/src/py3/classes/toolkits.py (revision 0) +++ ../trunk-jpl/src/py3/classes/toolkits.py (revision 19895) @@ -0,0 +1,113 @@ +from IssmConfig import IssmConfig +from mumpsoptions import mumpsoptions +from iluasmoptions import iluasmoptions +from EnumToString import EnumToString +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield + +class toolkits(object): + """ + TOOLKITS class definition + + Usage: + self=toolkits(); + """ + + def __init__(self): # {{{ + #default toolkits + if IssmConfig('_HAVE_PETSC_')[0]: + #MUMPS is the default toolkits + if IssmConfig('_HAVE_MUMPS_')[0]: + self.DefaultAnalysis = mumpsoptions() + else: + self.DefaultAnalysis = iluasmoptions() + else: + if IssmConfig('_HAVE_MUMPS_')[0]: + self.DefaultAnalysis = issmmumpssolver() + elif IssmConfig('_HAVE_GSL_')[0]: + self.DefaultAnalysis = issmgslsolver() + else: + raise IOError("ToolkitsFile error: need at least Mumps or Gsl to define issm solver type") + #The other properties are dynamic + # }}} + def __repr__(self): # {{{ + print('enter repr') + s ="List of toolkits options per analysis:\n\n" + for analysis in list(vars(self).keys()): + s+="%s\n" % fielddisplay(self,analysis,'') + + return s + # }}} + def addoptions(self,analysis,*args): # {{{ + print('enter addoption') + # Usage example: + # md.toolkits=addoptions(md.toolkits,StressbalanceAnalysisEnum(),FSoptions()); + # md.toolkits=addoptions(md.toolkits,StressbalanceAnalysisEnum()); + + #Convert analysis from enum to string + [analysis]=EnumToString(analysis) + + #Create dynamic property if property does not exist yet + if not hasattr(self,analysis): +# exec("self.%s = None" % analysis) + setattr(self,analysis,None) + + #Add toolkits options to analysis + if len(args)==1: + setattr(self,analysis,args[0]) + + return self + # }}} + def checkconsistency(self,md,solution,analyses): # {{{ + print('enter check') + for analysis in list(vars(self).keys()): + if not getattr(self,analysis): + md.checkmessage("md.toolkits.%s is empty" % analysis) + + return md + # }}} + def ToolkitsFile(self,filename): # {{{ + """ + TOOLKITSFILE- build toolkits file + + Build a Petsc compatible options file, from the toolkits model field + return options string + This file will also be used when the toolkit used is 'issm' instead of 'petsc' + + + Usage: ToolkitsFile(toolkits,filename); + """ + + #open file for writing + try: + fid=open(filename,'w') + except IOError as e: + raise IOError("ToolkitsFile error: could not open '%s' for writing." % filename) + + #write header + fid.write("%s%s%s\n" % ('%Petsc options file: ',filename,' written from Matlab toolkits array')) + + #start writing options + for analysis in list(vars(self).keys()): + options=getattr(self,analysis) + + #first write analysis: + fid.write("\n+%s\n" % analysis) #append a + to recognize it's an analysis enum + + #now, write options + for optionname,optionvalue in list(options.items()): + + if not optionvalue: + #this option has only one argument + fid.write("-%s\n" % optionname) + else: + #option with value. value can be string or scalar + if isinstance(optionvalue,(bool,int,float)): + fid.write("-%s %g\n" % (optionname,optionvalue)) + elif isinstance(optionvalue,str): + fid.write("-%s %s\n" % (optionname,optionvalue)) + else: + raise TypeError("ToolkitsFile error: option '%s' is not well formatted." % optionname) + + fid.close() + # }}} Index: ../trunk-jpl/src/py3/classes/SMBmeltcomponents.py =================================================================== --- ../trunk-jpl/src/py3/classes/SMBmeltcomponents.py (revision 0) +++ ../trunk-jpl/src/py3/classes/SMBmeltcomponents.py (revision 19895) @@ -0,0 +1,108 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import * +from project3d import * +from WriteData import * + +class SMBmeltcomponents(object): + """ + SMBmeltcomponents Class definition + + Usage: + SMBmeltcomponents=SMBmeltcomponents(); + """ + + def __init__(self): # {{{ + self.accumulation = float('NaN') + self.runoff = float('NaN') + self.evaporation = float('NaN') + self.requested_outputs = [] + #}}} + def __repr__(self): # {{{ + string=" surface forcings parameters with melt (SMB=accumulation-evaporation-melt+refreeze) :" + string="%s\n%s"%(string,fielddisplay(self,'accumulation','accumulated snow [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'evaporation','mount of ice lost to evaporative processes [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'melt','amount of ice melt in the ice column [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'refreeze','amount of ice melt refrozen in the ice column [m/yr ice eq]')) + string="%s\n%s"%(string,fielddisplay(self,'requested_outputs','additional outputs requested')) + return string + #}}} + def extrude(self,md): # {{{ + + self.mass_balance=project3d(md,'vector',self.accumulation,'type','node'); + self.mass_balance=project3d(md,'vector',self.evaporation,'type','node'); + self.mass_balance=project3d(md,'vector',self.melt,'type','node'); + self.mass_balance=project3d(md,'vector',self.refreeze,'type','node'); + return self + #}}} + def defaultoutputs(self,md): # {{{ + return [] + #}}} + def initialize(self,md): # {{{ + + if numpy.all(numpy.isnan(self.accumulation)): + self.accumulation=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMB.accumulation specified: values set as zero") + + if numpy.all(numpy.isnan(self.evaporation)): + self.evaporation=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMB.evaporation specified: values set as zero") + + if numpy.all(numpy.isnan(self.melt)): + self.melt=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMB.melt specified: values set as zero") + + if numpy.all(numpy.isnan(self.refreeze)): + self.refreeze=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no SMB.refreeze specified: values set as zero") + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.accumulation','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.accumulation','size',[md.mesh.numberofvertices],'NaN',1) + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.melt','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.melt','size',[md.mesh.numberofvertices],'NaN',1) + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.refreeze','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.refreeze','size',[md.mesh.numberofvertices],'NaN',1) + + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.evaporation','timeseries',1,'NaN',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','smb.evaporation','size',[md.mesh.numberofvertices],'NaN',1) + + md = checkfield(md,'fieldname','masstransport.requested_outputs','stringrow',1) + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',SmbEnum(),'data',SMBmeltcomponentsEnum(),'format','Integer'); + WriteData(fid,'object',self,'class','smb','fieldname','accumulation','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','evaporation','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','melt','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'class','smb','fieldname','refreeze','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + + #process requested outputs + outputs = self.requested_outputs + indices = [i for i, x in enumerate(outputs) if x == 'default'] + if len(indices) > 0: + outputscopy=outputs[0:max(0,indices[0]-1)]+self.defaultoutputs(md)+outputs[indices[0]+1:] + outputs =outputscopy + WriteData(fid,'data',outputs,'enum',SmbRequestedOutputsEnum(),'format','StringArray') + + # }}} Index: ../trunk-jpl/src/py3/classes/private.py =================================================================== --- ../trunk-jpl/src/py3/classes/private.py (revision 0) +++ ../trunk-jpl/src/py3/classes/private.py (revision 19895) @@ -0,0 +1,37 @@ +from collections import OrderedDict +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield + +class private(object): + """ + PRIVATE class definition + + Usage: + private=private(); + """ + + def __init__(self): # {{{ + self.isconsistent = True + self.runtimename = '' + self.bamg = OrderedDict() + self.solution = '' + + #set defaults + self.setdefaultparameters() + #}}} + def __repr__(self): # {{{ + string=' private parameters: do not change' + + string="%s\n%s"%(string,fielddisplay(self,'isconsistent','is model self consistent')) + string="%s\n%s"%(string,fielddisplay(self,'runtimename','name of the run launched')) + string="%s\n%s"%(string,fielddisplay(self,'bamg','structure with mesh properties constructed if bamg is used to mesh the domain')) + string="%s\n%s"%(string,fielddisplay(self,'solution','type of solution launched')) + return string + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + return md + # }}} Index: ../trunk-jpl/src/py3/classes/inversion.py =================================================================== --- ../trunk-jpl/src/py3/classes/inversion.py (revision 0) +++ ../trunk-jpl/src/py3/classes/inversion.py (revision 19895) @@ -0,0 +1,194 @@ +import numpy +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from StringToEnum import StringToEnum +from checkfield import checkfield +from WriteData import WriteData +from supportedcontrols import supportedcontrols +from supportedcostfunctions import supportedcostfunctions +from marshallcostfunctions import marshallcostfunctions + +class inversion(object): + """ + INVERSION class definition + + Usage: + inversion=inversion() + """ + + def __init__(self): # {{{ + self.iscontrol = 0 + self.incomplete_adjoint = 0 + self.control_parameters = float('NaN') + self.nsteps = 0 + self.maxiter_per_step = float('NaN') + self.cost_functions = float('NaN') + self.cost_functions_coefficients = float('NaN') + self.gradient_scaling = float('NaN') + self.cost_function_threshold = 0 + self.min_parameters = float('NaN') + self.max_parameters = float('NaN') + self.step_threshold = float('NaN') + self.vx_obs = float('NaN') + self.vy_obs = float('NaN') + self.vz_obs = float('NaN') + self.vel_obs = float('NaN') + self.thickness_obs = float('NaN') + self.surface_obs = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' inversion parameters:' + string="%s\n%s"%(string,fielddisplay(self,'iscontrol','is inversion activated?')) + string="%s\n%s"%(string,fielddisplay(self,'incomplete_adjoint','1: linear viscosity, 0: non-linear viscosity')) + string="%s\n%s"%(string,fielddisplay(self,'control_parameters','ex: {''FrictionCoefficient''}, or {''MaterialsRheologyBbar''}')) + string="%s\n%s"%(string,fielddisplay(self,'nsteps','number of optimization searches')) + string="%s\n%s"%(string,fielddisplay(self,'cost_functions','indicate the type of response for each optimization step')) + string="%s\n%s"%(string,fielddisplay(self,'cost_functions_coefficients','cost_functions_coefficients applied to the misfit of each vertex and for each control_parameter')) + string="%s\n%s"%(string,fielddisplay(self,'cost_function_threshold','misfit convergence criterion. Default is 1%, NaN if not applied')) + string="%s\n%s"%(string,fielddisplay(self,'maxiter_per_step','maximum iterations during each optimization step')) + string="%s\n%s"%(string,fielddisplay(self,'gradient_scaling','scaling factor on gradient direction during optimization, for each optimization step')) + string="%s\n%s"%(string,fielddisplay(self,'step_threshold','decrease threshold for misfit, default is 30%')) + string="%s\n%s"%(string,fielddisplay(self,'min_parameters','absolute minimum acceptable value of the inversed parameter on each vertex')) + string="%s\n%s"%(string,fielddisplay(self,'max_parameters','absolute maximum acceptable value of the inversed parameter on each vertex')) + string="%s\n%s"%(string,fielddisplay(self,'vx_obs','observed velocity x component [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'vy_obs','observed velocity y component [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'vel_obs','observed velocity magnitude [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'thickness_obs','observed thickness [m]')) + string="%s\n%s"%(string,fielddisplay(self,'surface_obs','observed surface elevation [m]')) + string="%s\n%s"%(string,'Available cost functions:') + string="%s\n%s"%(string,' 101: SurfaceAbsVelMisfit') + string="%s\n%s"%(string,' 102: SurfaceRelVelMisfit') + string="%s\n%s"%(string,' 103: SurfaceLogVelMisfit') + string="%s\n%s"%(string,' 104: SurfaceLogVxVyMisfit') + string="%s\n%s"%(string,' 105: SurfaceAverageVelMisfit') + string="%s\n%s"%(string,' 201: ThicknessAbsMisfit') + string="%s\n%s"%(string,' 501: DragCoefficientAbsGradient') + string="%s\n%s"%(string,' 502: RheologyBbarAbsGradient') + string="%s\n%s"%(string,' 503: ThicknessAbsGradient') + return string + #}}} + def extrude(self,md): # {{{ + self.vx_obs=project3d(md,'vector',self.vx_obs,'type','node') + self.vy_obs=project3d(md,'vector',self.vy_obs,'type','node') + self.vel_obs=project3d(md,'vector',self.vel_obs,'type','node') + self.thickness_obs=project3d(md,'vector',self.thickness_obs,'type','node') + if not numpy.any(numpy.isnan(self.cost_functions_coefficients)): + self.cost_functions_coefficients=project3d(md,'vector',self.cost_functions_coefficients,'type','node') + if not numpy.any(numpy.isnan(self.min_parameters)): + self.min_parameters=project3d(md,'vector',self.min_parameters,'type','node') + if not numpy.any(numpy.isnan(self.max_parameters)): + self.max_parameters=project3d(md,'vector',self.max_parameters,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + + #default is incomplete adjoint for now + self.incomplete_adjoint=1 + + #parameter to be inferred by control methods (only + #drag and B are supported yet) + self.control_parameters='FrictionCoefficient' + + #number of steps in the control methods + self.nsteps=20 + + #maximum number of iteration in the optimization algorithm for + #each step + self.maxiter_per_step=20*numpy.ones(self.nsteps) + + #the inversed parameter is updated as follows: + #new_par=old_par + gradient_scaling(n)*C*gradient with C in [0 1]; + #usually the gradient_scaling must be of the order of magnitude of the + #inversed parameter (10^8 for B, 50 for drag) and can be decreased + #after the first iterations + self.gradient_scaling=50*numpy.ones((self.nsteps,1)) + + #several responses can be used: + self.cost_functions=101 + + #step_threshold is used to speed up control method. When + #misfit(1)/misfit(0) < self.step_threshold, we go directly to + #the next step + self.step_threshold=.7*numpy.ones(self.nsteps) #30 per cent decrement + + #cost_function_threshold is a criteria to stop the control methods. + #if J[n]-J[n-1]/J[n] < criteria, the control run stops + #NaN if not applied + self.cost_function_threshold=float('NaN') #not activated + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if not self.iscontrol: + return md + + num_controls=numpy.size(md.inversion.control_parameters) + num_costfunc=numpy.size(md.inversion.cost_functions) + + md = checkfield(md,'fieldname','inversion.iscontrol','values',[0,1]) + md = checkfield(md,'fieldname','inversion.incomplete_adjoint','values',[0,1]) + md = checkfield(md,'fieldname','inversion.control_parameters','cell',1,'values',supportedcontrols()) + md = checkfield(md,'fieldname','inversion.nsteps','numel',[1],'>=',0) + md = checkfield(md,'fieldname','inversion.maxiter_per_step','size',[md.inversion.nsteps],'>=',0) + md = checkfield(md,'fieldname','inversion.step_threshold','size',[md.inversion.nsteps]) + md = checkfield(md,'fieldname','inversion.cost_functions','size',[num_costfunc],'values',supportedcostfunctions()) + md = checkfield(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices,num_costfunc],'>=',0) + md = checkfield(md,'fieldname','inversion.gradient_scaling','size',[md.inversion.nsteps,num_controls]) + md = checkfield(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices,num_controls]) + md = checkfield(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices,num_controls]) + + #Only SSA, HO and FS are supported right now + if solution==StressbalanceSolutionEnum(): + if not (md.flowequation.isSSA or md.flowequation.isHO or md.flowequation.isFS or md.flowequation.isL1L2): + md.checkmessage("'inversion can only be performed for SSA, HO or FS ice flow models"); + + if solution==BalancethicknessSolutionEnum(): + md = checkfield(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices],'NaN',1) + else: + md = checkfield(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices],'NaN',1) + md = checkfield(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices],'NaN',1) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',InversionTypeEnum(),'data',0,'format','Integer') + WriteData(fid,'object',self,'fieldname','iscontrol','format','Boolean') + WriteData(fid,'object',self,'fieldname','incomplete_adjoint','format','Boolean') + if not self.iscontrol: + return + WriteData(fid,'object',self,'fieldname','nsteps','format','Integer') + WriteData(fid,'object',self,'fieldname','maxiter_per_step','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'fieldname','cost_functions_coefficients','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','gradient_scaling','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'fieldname','cost_function_threshold','format','Double') + WriteData(fid,'object',self,'fieldname','min_parameters','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'fieldname','max_parameters','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'fieldname','step_threshold','format','DoubleMat','mattype',3) + WriteData(fid,'object',self,'fieldname','vx_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'fieldname','vy_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'fieldname','vz_obs','format','DoubleMat','mattype',1,'scale',1./yts) + WriteData(fid,'object',self,'fieldname','thickness_obs','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','surface_obs','format','DoubleMat','mattype',1) + + #process control parameters + num_control_parameters=len(self.control_parameters) + data=numpy.array([StringToEnum(control_parameter)[0] for control_parameter in self.control_parameters]).reshape(1,-1) + WriteData(fid,'data',data,'enum',InversionControlParametersEnum(),'format','DoubleMat','mattype',3) + WriteData(fid,'data',num_control_parameters,'enum',InversionNumControlParametersEnum(),'format','Integer') + + #process cost functions + num_cost_functions=numpy.size(self.cost_functions) + data=marshallcostfunctions(self.cost_functions) + WriteData(fid,'data',numpy.array(data).reshape(1,-1),'enum',InversionCostFunctionsEnum(),'format','DoubleMat','mattype',3) + WriteData(fid,'data',num_cost_functions,'enum',InversionNumCostFunctionsEnum(),'format','Integer') + # }}} Index: ../trunk-jpl/src/py3/classes/constants.py =================================================================== --- ../trunk-jpl/src/py3/classes/constants.py (revision 0) +++ ../trunk-jpl/src/py3/classes/constants.py (revision 19895) @@ -0,0 +1,57 @@ +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class constants(object): + """ + CONSTANTS class definition + + Usage: + constants=constants(); + """ + + def __init__(self): # {{{ + self.g = 0 + self.yts = 0 + self.referencetemperature = 0 + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" constants parameters:" + + string="%s\n%s"%(string,fielddisplay(self,"g","gravitational acceleration [m/s^2]")) + string="%s\n%s"%(string,fielddisplay(self,"yts","number of seconds in a year [s/yr]")) + string="%s\n%s"%(string,fielddisplay(self,"referencetemperature","reference temperature used in the enthalpy model [K]")) + + return string + #}}} + def setdefaultparameters(self): # {{{ + + #acceleration due to gravity (m/s^2) + self.g=9.81 + + #converstion from year to seconds + self.yts=365*24*3600 + + #the reference temperature for enthalpy model (cf Aschwanden) + self.referencetemperature=223.15 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,'fieldname','constants.g','>',0,'size',[1]) + md = checkfield(md,'fieldname','constants.yts','>',0,'size',[1]) + md = checkfield(md,'fieldname','constants.referencetemperature','size',[1]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','g','format','Double') + WriteData(fid,'object',self,'fieldname','yts','format','Double') + WriteData(fid,'object',self,'fieldname','referencetemperature','format','Double') + # }}} Index: ../trunk-jpl/src/py3/classes/friction.py =================================================================== --- ../trunk-jpl/src/py3/classes/friction.py (revision 0) +++ ../trunk-jpl/src/py3/classes/friction.py (revision 19895) @@ -0,0 +1,58 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class friction(object): + """ + FRICTION class definition + + Usage: + friction=friction() + """ + + def __init__(self): # {{{ + self.coefficient = float('NaN') + self.p = float('NaN') + self.q = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string="Basal shear stress parameters: Sigma_b = coefficient^2 * Neff ^r * |u_b|^(s-1) * u_b,\n(effective stress Neff=rho_ice*g*thickness+rho_water*g*bed, r=q/p and s=1/p)" + + string="%s\n%s"%(string,fielddisplay(self,"coefficient","friction coefficient [SI]")) + string="%s\n%s"%(string,fielddisplay(self,"p","p exponent")) + string="%s\n%s"%(string,fielddisplay(self,"q","q exponent")) + return string + #}}} + def extrude(self,md): # {{{ + self.coefficient=project3d(md,'vector',self.coefficient,'type','node','layer',1) + self.p=project3d(md,'vector',self.p,'type','element') + self.q=project3d(md,'vector',self.q,'type','element') + return self + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if StressbalanceAnalysisEnum() not in analyses and ThermalAnalysisEnum() not in analyses: + return md + + md = checkfield(md,'fieldname','friction.coefficient','timeseries',1,'NaN',1) + md = checkfield(md,'fieldname','friction.q','NaN',1,'size',[md.mesh.numberofelements]) + md = checkfield(md,'fieldname','friction.p','NaN',1,'size',[md.mesh.numberofelements]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',FrictionLawEnum(),'data',1,'format','Integer') + WriteData(fid,'object',self,'fieldname','coefficient','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','p','format','DoubleMat','mattype',2) + WriteData(fid,'object',self,'fieldname','q','format','DoubleMat','mattype',2) + # }}} Index: ../trunk-jpl/src/py3/classes/flowequation.py =================================================================== --- ../trunk-jpl/src/py3/classes/flowequation.py (revision 0) +++ ../trunk-jpl/src/py3/classes/flowequation.py (revision 19895) @@ -0,0 +1,162 @@ +import numpy +import copy +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class flowequation(object): + """ + FLOWEQUATION class definition + + Usage: + flowequation=flowequation(); + """ + + def __init__(self): # {{{ + + self.isSIA = 0 + self.isSSA = 0 + self.isL1L2 = 0 + self.isHO = 0 + self.isFS = 0 + self.fe_SSA = '' + self.fe_HO = '' + self.fe_FS = '' + self.augmented_lagrangian_r = 1. + self.augmented_lagrangian_rhop = 1. + self.augmented_lagrangian_rlambda = 1. + self.augmented_lagrangian_rholambda = 1. + self.XTH_theta = 0. + self.vertex_equation = float('NaN') + self.element_equation = float('NaN') + self.borderSSA = float('NaN') + self.borderHO = float('NaN') + self.borderFS = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' flow equation parameters:' + + string="%s\n%s"%(string,fielddisplay(self,'isSIA',"is the Shallow Ice Approximation (SIA) used ?")) + string="%s\n%s"%(string,fielddisplay(self,'isSSA',"is the Shelfy-Stream Approximation (SSA) used ?")) + string="%s\n%s"%(string,fielddisplay(self,'isL1L2',"are L1L2 equations used ?")) + string="%s\n%s"%(string,fielddisplay(self,'isHO',"is the Higher-Order (HO) approximation used ?")) + string="%s\n%s"%(string,fielddisplay(self,'isFS',"are the Full-FS (FS) equations used ?")) + string="%s\n%s"%(string,fielddisplay(self,'fe_SSA',"Finite Element for SSA: 'P1', 'P1bubble' 'P1bubblecondensed' 'P2'")) + string="%s\n%s"%(string,fielddisplay(self,'fe_HO' ,"Finite Element for HO: 'P1' 'P1bubble' 'P1bubblecondensed' 'P1xP2' 'P2xP1' 'P2'")) + string="%s\n%s"%(string,fielddisplay(self,'fe_FS' ,"Finite Element for FS: 'P1P1' (debugging only) 'P1P1GLS' 'MINIcondensed' 'MINI' 'TaylorHood' 'LATaylorHood' 'XTaylorHood'")) + string="%s\n%s"%(string,fielddisplay(self,'vertex_equation',"flow equation for each vertex")) + string="%s\n%s"%(string,fielddisplay(self,'element_equation',"flow equation for each element")) + string="%s\n%s"%(string,fielddisplay(self,'borderSSA',"vertices on SSA's border (for tiling)")) + string="%s\n%s"%(string,fielddisplay(self,'borderHO',"vertices on HO's border (for tiling)")) + string="%s\n%s"%(string,fielddisplay(self,'borderFS',"vertices on FS' border (for tiling)")) + return string + #}}} + def extrude(self,md): # {{{ + self.element_equation=project3d(md,'vector',self.element_equation,'type','element') + self.vertex_equation=project3d(md,'vector',self.vertex_equation,'type','node') + self.borderSSA=project3d(md,'vector',self.borderSSA,'type','node') + self.borderHO=project3d(md,'vector',self.borderHO,'type','node') + self.borderFS=project3d(md,'vector',self.borderFS,'type','node') + return self + #}}} + def setdefaultparameters(self): # {{{ + + #P1 for SSA + self.fe_SSA= 'P1'; + + #P1 for HO + self.fe_HO= 'P1'; + + #MINI condensed element for FS by default + self.fe_FS = 'MINIcondensed'; + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + #Early return + if (StressbalanceAnalysisEnum() not in analyses and StressbalanceSIAAnalysisEnum() not in analyses) or (solution==TransientSolutionEnum() and not md.transient.isstressbalance): + return md + + md = checkfield(md,'fieldname','flowequation.isSIA','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.isSSA','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.isL1L2','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.isHO','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.isFS','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.fe_SSA','values',['P1','P1bubble','P1bubblecondensed','P2','P2bubble']) + md = checkfield(md,'fieldname','flowequation.fe_HO' ,'values',['P1','P1bubble','P1bubblecondensed','P1xP2','P2xP1','P2','P2bubble','P1xP3','P2xP4']) + md = checkfield(md,'fieldname','flowequation.fe_FS' ,'values',['P1P1','P1P1GLS','MINIcondensed','MINI','TaylorHood','XTaylorHood','OneLayerP4z','CrouzeixRaviart']) + md = checkfield(md,'fieldname','flowequation.borderSSA','size',[md.mesh.numberofvertices],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.borderHO','size',[md.mesh.numberofvertices],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.borderFS','size',[md.mesh.numberofvertices],'values',[0,1]) + md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_r','numel',[1],'>',0.) + md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_rhop','numel',[1],'>',0.) + md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_rlambda','numel',[1],'>',0.) + md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_rholambda','numel',[1],'>',0.) + md = checkfield(md,'fieldname','flowequation.XTH_theta','numel',[1],'>=',0.,'<',.5) + if m.strcmp(md.mesh.domaintype(),'2Dhorizontal'): + md = checkfield(md,'fieldname','flowequation.vertex_equation','size',[md.mesh.numberofvertices],'values',[1,2]) + md = checkfield(md,'fieldname','flowequation.element_equation','size',[md.mesh.numberofelements],'values',[1,2]) + elif m.strcmp(md.mesh.domaintype(),'3D'): + md = checkfield(md,'fieldname','flowequation.vertex_equation','size',[md.mesh.numberofvertices],'values',numpy.arange(0,8+1)) + md = checkfield(md,'fieldname','flowequation.element_equation','size',[md.mesh.numberofelements],'values',numpy.arange(0,8+1)) + else: + raise RuntimeError('mesh type not supported yet') + if not (self.isSIA or self.isSSA or self.isL1L2 or self.isHO or self.isFS): + md.checkmessage("no element types set for this model") + + if StressbalanceSIAAnalysisEnum() in analyses: + if any(self.element_equation==1): + if numpy.any(numpy.logical_and(self.vertex_equation,md.mask.groundedice_levelset)): + print("\n !!! Warning: SIA's model is not consistent on ice shelves !!!\n") + + return md + # }}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','isSIA','format','Boolean') + WriteData(fid,'object',self,'fieldname','isSSA','format','Boolean') + WriteData(fid,'object',self,'fieldname','isL1L2','format','Boolean') + WriteData(fid,'object',self,'fieldname','isHO','format','Boolean') + WriteData(fid,'object',self,'fieldname','isFS','format','Boolean') + WriteData(fid,'enum',FlowequationFeSSAEnum(),'data',StringToEnum(self.fe_SSA)[0],'format','Integer') + WriteData(fid,'enum',FlowequationFeHOEnum() ,'data',StringToEnum(self.fe_HO)[0] ,'format','Integer') + WriteData(fid,'enum',FlowequationFeFSEnum() ,'data',StringToEnum(self.fe_FS)[0] ,'format','Integer') + WriteData(fid,'enum',AugmentedLagrangianREnum(),'data',self.augmented_lagrangian_r ,'format','Double') + WriteData(fid,'enum',AugmentedLagrangianRhopEnum(),'data',self.augmented_lagrangian_rhop ,'format','Double') + WriteData(fid,'enum',AugmentedLagrangianRlambdaEnum(),'data',self.augmented_lagrangian_rlambda ,'format','Double') + WriteData(fid,'enum',AugmentedLagrangianRholambdaEnum(),'data',self.augmented_lagrangian_rholambda ,'format','Double') + WriteData(fid,'enum',AugmentedLagrangianThetaEnum() ,'data',self.XTH_theta ,'format','Double') + WriteData(fid,'object',self,'fieldname','borderSSA','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','borderHO','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','borderFS','format','DoubleMat','mattype',1) + #convert approximations to enums + data=copy.deepcopy(self.vertex_equation) + data[numpy.nonzero(data==0)]=NoneApproximationEnum() + data[numpy.nonzero(data==1)]=SIAApproximationEnum() + data[numpy.nonzero(data==2)]=SSAApproximationEnum() + data[numpy.nonzero(data==3)]=L1L2ApproximationEnum() + data[numpy.nonzero(data==4)]=HOApproximationEnum() + data[numpy.nonzero(data==5)]=FSApproximationEnum() + data[numpy.nonzero(data==6)]=SSAHOApproximationEnum() + data[numpy.nonzero(data==7)]=HOFSApproximationEnum() + data[numpy.nonzero(data==8)]=SSAFSApproximationEnum() + WriteData(fid,'data',data,'enum',FlowequationVertexEquationEnum(),'format','DoubleMat','mattype',1) + data=copy.deepcopy(self.element_equation) + data[numpy.nonzero(data==0)]=NoneApproximationEnum() + data[numpy.nonzero(data==1)]=SIAApproximationEnum() + data[numpy.nonzero(data==2)]=SSAApproximationEnum() + data[numpy.nonzero(data==3)]=L1L2ApproximationEnum() + data[numpy.nonzero(data==4)]=HOApproximationEnum() + data[numpy.nonzero(data==5)]=FSApproximationEnum() + data[numpy.nonzero(data==6)]=SSAHOApproximationEnum() + data[numpy.nonzero(data==7)]=SSAFSApproximationEnum() + data[numpy.nonzero(data==8)]=HOFSApproximationEnum() + WriteData(fid,'data',data,'enum',FlowequationElementEquationEnum(),'format','DoubleMat','mattype',2) + # }}} Index: ../trunk-jpl/src/py3/classes/basalforcings.py =================================================================== --- ../trunk-jpl/src/py3/classes/basalforcings.py (revision 0) +++ ../trunk-jpl/src/py3/classes/basalforcings.py (revision 19895) @@ -0,0 +1,79 @@ +from fielddisplay import fielddisplay +from project3d import project3d +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import numpy + +class basalforcings(object): + """ + BASAL FORCINGS class definition + + Usage: + basalforcings=basalforcings(); + """ + + def __init__(self): # {{{ + self.groundedice_melting_rate = float('NaN') + self.floatingice_melting_rate = float('NaN') + self.geothermalflux = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" basal forcings parameters:" + + string="%s\n%s"%(string,fielddisplay(self,"groundedice_melting_rate","basal melting rate (positive if melting) [m/yr]")) + string="%s\n%s"%(string,fielddisplay(self,"floatingice_melting_rate","basal melting rate (positive if melting) [m/yr]")) + string="%s\n%s"%(string,fielddisplay(self,"geothermalflux","geothermal heat flux [W/m^2]")) + return string + #}}} + def extrude(self,md): # {{{ + self.groundedice_melting_rate=project3d(md,'vector',self.groundedice_melting_rate,'type','node','layer',1) + self.floatingice_melting_rate=project3d(md,'vector',self.floatingice_melting_rate,'type','node','layer',1) + self.geothermalflux=project3d(md,'vector',self.geothermalflux,'type','node','layer',1) #bedrock only gets geothermal flux + return self + #}}} + def initialize(self,md): # {{{ + + if numpy.all(numpy.isnan(self.groundedice_melting_rate)): + self.groundedice_melting_rate=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no basalforcings.groundedice_melting_rate specified: values set as zero") + + if numpy.all(numpy.isnan(self.floatingice_melting_rate)): + self.floatingice_melting_rate=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no basalforcings.floatingice_melting_rate specified: values set as zero") + + return self + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + if MasstransportAnalysisEnum() in analyses and not (solution==TransientSolutionEnum() and not md.transient.ismasstransport): + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.floatingice_melting_rate','NaN',1,'timeseries',1) + + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','basalforcings.floatingice_melting_rate','NaN',1,'size',[md.mesh.numberofvertices]) + + if ThermalAnalysisEnum() in analyses and not (solution==TransientSolutionEnum() and not md.transient.isthermal): + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.floatingice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.geothermalflux','NaN',1,'timeseries',1,'>=',0) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'enum',BasalforcingsEnum(),'data',FloatingMeltRateEnum(),'format','Integer'); + WriteData(fid,'object',self,'fieldname','groundedice_melting_rate','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','floatingice_melting_rate','format','DoubleMat','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','geothermalflux','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + # }}} Index: ../trunk-jpl/src/py3/classes/clusters/pfe.py =================================================================== --- ../trunk-jpl/src/py3/classes/clusters/pfe.py (revision 0) +++ ../trunk-jpl/src/py3/classes/clusters/pfe.py (revision 19895) @@ -0,0 +1,199 @@ +# import socket +# import os +# import math +import subprocess +from fielddisplay import fielddisplay +from EnumToString import EnumToString +from pairoptions import pairoptions +from issmssh import issmssh +from issmscpin import issmscpin +from issmscpout import issmscpout +from QueueRequirements import QueueRequirements +try: + from pfe_settings import pfe_settings +except ImportError: + print('You need pfe_settings.py to proceed, check presence and sys.path') + +class pfe(object): + """ + PFE cluster class definition + + Usage: + cluster=pfe(); + cluster=pfe('np',3); + cluster=pfe('np',3,'login','username'); + """ + + def __init__(self,**kwargs): + # {{{ + + self.name = 'pfe' + self.login = '' + self.numnodes = 20 + self.cpuspernode = 8 + self.port = 1025 + self.queue = 'long' + self.time = 12*60 + self.processor = 'wes' + self.codepath = '' + self.executionpath = '' + self.grouplist = 's1010' + self.interactive = 0 + self.bbftp = 0 + self.numstreams = 8 + self.hyperthreading = 0 + + #use provided options to change fields + options=pairoptions(**kwargs) + + #initialize cluster using user settings if provided + self=pfe_settings(self) + self.np=self.nprocs() + #OK get other fields + self=options.AssignObjectFields(self) + + # }}} + + def __repr__(self): + # {{{ + # display the object + s = "class pfe object:" + s = "%s\n%s"%(s,fielddisplay(self,'name','name of the cluster')) + s = "%s\n%s"%(s,fielddisplay(self,'login','login')) + s = "%s\n%s"%(s,fielddisplay(self,'numnodes','number of nodes')) + s = "%s\n%s"%(s,fielddisplay(self,'cpuspernode','number of nodes per CPUs')) + s = "%s\n%s"%(s,fielddisplay(self,'np','number of CPUs')) + s = "%s\n%s"%(s,fielddisplay(self,'port','machine access port')) + s = "%s\n%s"%(s,fielddisplay(self,'codepath','code path on the cluster')) + s = "%s\n%s"%(s,fielddisplay(self,'executionpath','execution path on the cluster')) + s = "%s\n%s"%(s,fielddisplay(self,'queue','name of the queue')) + s = "%s\n%s"%(s,fielddisplay(self,'time','walltime requested')) + s = "%s\n%s"%(s,fielddisplay(self,'processor','type of processor')) + s = "%s\n%s"%(s,fielddisplay(self,'grouplist','name of the group')) + s = "%s\n%s"%(s,fielddisplay(self,'interactive','')) + s = "%s\n%s"%(s,fielddisplay(self,'bbftp','')) + s = "%s\n%s"%(s,fielddisplay(self,'numstreams','')) + s = "%s\n%s"%(s,fielddisplay(self,'hyperthreading','')) + return s + # }}} + + def nprocs(self): + # {{{ + self.np=self.numnodes*self.cpuspernode + return self.np + # }}} + def checkconsistency(self,md,solution,analyses): + # {{{ + + + queuedict = {'long': [5*24*60, 2048], + 'normal': [8*60, 2048], + 'debug':[2*60,150], + 'devel':[2*60,150]} + QueueRequirements(queuedict,self.queue,self.nprocs(),self.time) + + #now, check cluster.cpuspernode according to processor type + if self.processor=='har' or self.processor=='neh': + if self.hyperthreading: + if not 0=6: + executable='issm_dakota.exe' + + #write queuing script + fid=open(modelname+'.queue','w') + fid.write('#PBS -S /bin/bash\n') + fid.write('#PBS -l select=%i:ncpus=%i:model=%s\n' % (self.numnodes,self.cpuspernode,self.processor)) + fid.write('#PBS -l walltime=%i\n' % (self.time*60)) + fid.write('#PBS -q %s \n' % self.queue) + fid.write('#PBS -W group_list=%s\n' % self.grouplist) + fid.write('#PBS -m e\n') + fid.write('#PBS -o %s/%s/%s.outlog \n' % (self.executionpath,dirname,modelname)) + fid.write('#PBS -e %s/%s/%s.errlog \n\n' % (self.executionpath,dirname,modelname)) + fid.write('. /usr/share/modules/init/bash\n\n') + fid.write('module load comp-intel/2015.0.090\n') + fid.write('module load mpi-sgi/mpt.2.11r13\n') + fid.write('export PATH="$PATH:."\n\n') + fid.write('export MPI_GROUP_MAX=64\n\n') + fid.write('export ISSM_DIR="%s/../"\n' % self.codepath) + fid.write('source $ISSM_DIR/etc/environment.sh\n') + fid.write('cd %s/%s/\n\n' % (self.executionpath,dirname)) + fid.write('mpiexec -np %i %s/%s %s %s/%s %s\n' % (self.nprocs(),self.codepath,executable,str(EnumToString(solution)[0]),self.executionpath,dirname,modelname)) + + fid.close() + + # }}} + def UploadQueueJob(self,modelname,dirname,filelist): + # {{{ + + #compress the files into one zip. + compressstring='tar -zcf %s.tar.gz ' % dirname + for file in filelist: + compressstring += ' %s' % file + subprocess.call(compressstring,shell=True) + + print('uploading input file and queueing script') + issmscpout(self.name,self.executionpath,self.login,self.port,[dirname+'.tar.gz']) + + # }}} + def LaunchQueueJob(self,modelname,dirname,filelist,restart): + # {{{ + + print('launching solution sequence on remote cluster') + if restart: + launchcommand='cd %s && cd %s && qsub %s.queue' % (self.executionpath,dirname,modelname) + else: + launchcommand='cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz && qsub %s.queue' % (self.executionpath,dirname,dirname,dirname,dirname,dirname,modelname) + issmssh(self.name,self.login,self.port,launchcommand) + + # }}} + def Download(self,dirname,filelist): + # {{{ + + #copy files from cluster to current directory + directory='%s/%s/' % (self.executionpath,dirname) + issmscpin(self.name,self.login,self.port,directory,filelist) + # }}} Index: ../trunk-jpl/src/py3/classes/clusters/generic.py =================================================================== --- ../trunk-jpl/src/py3/classes/clusters/generic.py (revision 0) +++ ../trunk-jpl/src/py3/classes/clusters/generic.py (revision 19895) @@ -0,0 +1,204 @@ +import socket +import os +import math +import subprocess +from IssmConfig import IssmConfig +from EnumToString import EnumToString +from issmdir import issmdir +from pairoptions import pairoptions +from issmssh import issmssh +from issmscpin import issmscpin +from issmscpout import issmscpout +import MatlabFuncs as m + +class generic(object): + """ + GENERIC cluster class definition + + Usage: + cluster=generic('name','astrid','np',3); + cluster=generic('name',oshostname(),'np',3,'login','username'); + """ + + def __init__(self,**kwargs): # {{{ + + self.name='' + self.login='' + self.np=1 + self.port=0 + self.interactive=1 + self.codepath=issmdir()+'/bin' + self.executionpath=issmdir()+'/execution' + self.valgrind=issmdir()+'/externalpackages/valgrind/install/bin/valgrind' + self.valgrindlib=issmdir()+'/externalpackages/valgrind/install/lib/libmpidebug.so' + self.valgrindsup=issmdir()+'/externalpackages/valgrind/issm.supp' + + #use provided options to change fields + options=pairoptions(**kwargs) + + #get name + self.name=socket.gethostname() + + #initialize cluster using user settings if provided + if os.path.exists(self.name+'_settings.py'): + exec(compile(open(self.name+'_settings.py').read(), self.name+'_settings.py', 'exec'),globals()) + + #OK get other fields + self=options.AssignObjectFields(self) + # }}} + def __repr__(self): # {{{ + # display the object + s ="class '%s' object '%s' = \n" % (type(self),'self') + s+=" name: %s\n" % self.name + s+=" login: %s\n" % self.login + s+=" np: %i\n" % self.np + s+=" port: %i\n" % self.port + s+=" codepath: %s\n" % self.codepath + s+=" executionpath: %s\n" % self.executionpath + s+=" valgrind: %s\n" % self.valgrind + s+=" valgrindlib: %s\n" % self.valgrindlib + s+=" valgrindsup: %s\n" % self.valgrindsup + return s + # }}} + def checkconsistency(self,md,solution,analyses): # {{{ + if self.np<1: + md = checkmessage(md,'number of processors should be at least 1') + if math.isnan(self.np): + md = checkmessage(md,'number of processors should not be NaN!') + + return md + # }}} + def BuildQueueScript(self,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota): # {{{ + + executable='issm.exe'; + if isdakota: + version=IssmConfig('_DAKOTA_VERSION_')[0:2] + version=float(version) + if version>=6: + executable='issm_dakota.exe' + + #write queuing script + if not m.ispc(): + + fid=open(modelname+'.queue','w') + fid.write('#!/bin/sh\n') + if not isvalgrind: + if self.interactive: + if IssmConfig('_HAVE_MPI_')[0]: + fid.write('mpiexec -np %i %s/%s %s %s/%s %s ' % (self.np,self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname)) + else: + fid.write('%s/%s %s %s/%s %s ' % (self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname)) + else: + if IssmConfig('_HAVE_MPI_')[0]: + fid.write('mpiexec -np %i %s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % (self.np,self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname,modelname,modelname)) + else: + fid.write('%s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % (self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname,modelname,modelname)) + elif isgprof: + fid.write('\n gprof %s/%s gmon.out > %s.performance' % (self.codepath,executable,modelname)) + else: + #Add --gen-suppressions=all to get suppression lines + fid.write('LD_PRELOAD=%s \\\n' % self.valgrindlib) + if IssmConfig('_HAVE_MPI_')[0]: + fid.write('mpiexec -np %i %s --leak-check=full --suppressions=%s %s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % \ + (self.np,self.valgrind,self.valgrindsup,self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname,modelname,modelname)) + else: + fid.write('%s --leak-check=full --suppressions=%s %s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % \ + (self.valgrind,self.valgrindsup,self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname,modelname,modelname)) + + if not io_gather: #concatenate the output files: + fid.write('\ncat %s.outbin.* > %s.outbin' % (modelname,modelname)) + fid.close() + + else: # Windows + + fid=open(modelname+'.bat','w') + fid.write('@echo off\n') + if self.interactive: + fid.write('"%s/%s" %s "%s/%s" %s ' % (self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname)) + else: + fid.write('"%s/%s" %s "%s/%s" %s 2> %s.errlog >%s.outlog' % \ + (self.codepath,executable,EnumToString(solution)[0],self.executionpath,dirname,modelname,modelname,modelname)) + fid.close() + + #in interactive mode, create a run file, and errlog and outlog file + if self.interactive: + fid=open(modelname+'.errlog','w') + fid.close() + fid=open(modelname+'.outlog','w') + fid.close() + # }}} + def BuildKrigingQueueScript(self,modelname,solution,io_gather,isvalgrind,isgprof): # {{{ + + #write queuing script + if not m.ispc(): + + fid=open(modelname+'.queue','w') + fid.write('#!/bin/sh\n') + if not isvalgrind: + if self.interactive: + fid.write('mpiexec -np %i %s/kriging.exe %s/%s %s ' % (self.np,self.codepath,self.executionpath,modelname,modelname)) + else: + fid.write('mpiexec -np %i %s/kriging.exe %s/%s %s 2> %s.errlog >%s.outlog ' % (self.np,self.codepath,self.executionpath,modelname,modelname,modelname,modelname)) + elif isgprof: + fid.write('\n gprof %s/kriging.exe gmon.out > %s.performance' & (self.codepath,modelname)) + else: + #Add --gen-suppressions=all to get suppression lines + fid.write('LD_PRELOAD=%s \\\n' % self.valgrindlib) + fid.write('mpiexec -np %i %s --leak-check=full --suppressions=%s %s/kriging.exe %s/%s %s 2> %s.errlog >%s.outlog ' % \ + (self.np,self.valgrind,self.valgrindsup,self.codepath,self.executionpath,modelname,modelname,modelname,modelname)) + if not io_gather: #concatenate the output files: + fid.write('\ncat %s.outbin.* > %s.outbin' % (modelname,modelname)) + fid.close() + + else: # Windows + + fid=open(modelname+'.bat','w') + fid.write('@echo off\n') + if self.interactive: + fid.write('"%s/issm.exe" %s "%s/%s" %s ' % (self.codepath,EnumToString(solution)[0],self.executionpath,modelname,modelname)) + else: + fid.write('"%s/issm.exe" %s "%s/%s" %s 2> %s.errlog >%s.outlog' % \ + (self.codepath,EnumToString(solution)[0],self.executionpath,modelname,modelname,modelname,modelname)) + fid.close() + + #in interactive mode, create a run file, and errlog and outlog file + if self.interactive: + fid=open(modelname+'.errlog','w') + fid.close() + fid=open(modelname+'.outlog','w') + fid.close() + # }}} + def UploadQueueJob(self,modelname,dirname,filelist): # {{{ + + #compress the files into one zip. + compressstring='tar -zcf %s.tar.gz ' % dirname + for file in filelist: + compressstring += ' %s' % file + if self.interactive: + compressstring += ' %s.errlog %s.outlog ' % (modelname,modelname) + subprocess.call(compressstring,shell=True) + + print('uploading input file and queueing script') + issmscpout(self.name,self.executionpath,self.login,self.port,[dirname+'.tar.gz']) + + # }}} + def LaunchQueueJob(self,modelname,dirname,filelist,restart): # {{{ + + print('launching solution sequence on remote cluster') + if restart: + launchcommand='cd %s && cd %s chmod 777 %s.queue && ./%s.queue' % (self.executionpath,dirname,modelname,modelname) + else: + launchcommand='cd %s && rm -rf ./%s && mkdir %s && cd %s && mv ../%s.tar.gz ./ && tar -zxf %s.tar.gz && chmod 777 %s.queue && ./%s.queue' % \ + (self.executionpath,dirname,dirname,dirname,dirname,dirname,modelname,modelname) + issmssh(self.name,self.login,self.port,launchcommand) + # }}} + def Download(self,dirname,filelist): # {{{ + + if m.ispc(): + #do nothing + return + + #copy files from cluster to current directory + directory='%s/%s/' % (self.executionpath,dirname) + issmscpin(self.name,self.login,self.port,directory,filelist) + # }}} Index: ../trunk-jpl/src/py3/classes/mesh2d.py =================================================================== --- ../trunk-jpl/src/py3/classes/mesh2d.py (revision 0) +++ ../trunk-jpl/src/py3/classes/mesh2d.py (revision 19895) @@ -0,0 +1,122 @@ +import numpy +from fielddisplay import fielddisplay +from checkfield import checkfield +from WriteData import WriteData +from EnumDefinitions import * +import MatlabFuncs as m + +class mesh2d(object): + """ + MESH2D class definition + + Usage: + mesh2d=mesh2d(); + """ + + def __init__(self): # {{{ + self.x = float('NaN'); + self.y = float('NaN'); + self.elements = float('NaN'); + self.numberofelements = 0; + self.numberofvertices = 0; + self.numberofedges = 0; + + self.lat = float('NaN'); + self.long = float('NaN'); + self.epsg = 0; + + self.vertexonboundary = float('NaN'); + self.edges = float('NaN'); + self.segments = float('NaN'); + self.segmentmarkers = float('NaN'); + self.vertexconnectivity = float('NaN'); + self.elementconnectivity = float('NaN'); + self.average_vertex_connectivity = 0; + + self.extractedvertices = float('NaN'); + self.extractedelements = float('NaN'); + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" 2D tria Mesh (horizontal):" + + string="%s\n%s"%(string,"\n Elements and vertices:") + string="%s\n%s"%(string,fielddisplay(self,"numberofelements","number of elements")) + string="%s\n%s"%(string,fielddisplay(self,"numberofvertices","number of vertices")) + string="%s\n%s"%(string,fielddisplay(self,"elements","vertex indices of the mesh elements")) + string="%s\n%s"%(string,fielddisplay(self,"x","vertices x coordinate [m]")) + string="%s\n%s"%(string,fielddisplay(self,"y","vertices y coordinate [m]")) + string="%s\n%s"%(string,fielddisplay(self,"edges","edges of the 2d mesh (vertex1 vertex2 element1 element2)")) + string="%s\n%s"%(string,fielddisplay(self,"numberofedges","number of edges of the 2d mesh")) + + string="%s%s"%(string,"\n\n Properties:") + string="%s\n%s"%(string,fielddisplay(self,"vertexonboundary","vertices on the boundary of the domain flag list")) + string="%s\n%s"%(string,fielddisplay(self,"segments","edges on domain boundary (vertex1 vertex2 element)")) + string="%s\n%s"%(string,fielddisplay(self,"segmentmarkers","number associated to each segment")) + string="%s\n%s"%(string,fielddisplay(self,"vertexconnectivity","list of vertices connected to vertex_i")) + string="%s\n%s"%(string,fielddisplay(self,"elementconnectivity","list of vertices connected to element_i")) + string="%s\n%s"%(string,fielddisplay(self,"average_vertex_connectivity","average number of vertices connected to one vertex")) + + string="%s%s"%(string,"\n\n Extracted model:") + string="%s\n%s"%(string,fielddisplay(self,"extractedvertices","vertices extracted from the model")) + string="%s\n%s"%(string,fielddisplay(self,"extractedelements","elements extracted from the model")) + + string="%s%s"%(string,"\n\n Projection:") + string="%s\n%s"%(string,fielddisplay(self,"lat","vertices latitude [degrees]")) + string="%s\n%s"%(string,fielddisplay(self,"long","vertices longitude [degrees]")) + string="%s\n%s"%(string,fielddisplay(self,"epsg","EPSG code (ex: 3413 for UPS Greenland, 3031 for UPS Antarctica)")) + return string + #}}} + def setdefaultparameters(self): # {{{ + + #the connectivity is the averaged number of nodes linked to a + #given node through an edge. This connectivity is used to initially + #allocate memory to the stiffness matrix. A value of 16 seems to + #give a good memory/time ration. This value can be checked in + #trunk/test/Miscellaneous/runme.m + self.average_vertex_connectivity=25 + + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + + md = checkfield(md,'fieldname','mesh.x','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','mesh.y','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','mesh.elements','NaN',1,'>',0,'values',numpy.arange(1,md.mesh.numberofvertices+1)) + md = checkfield(md,'fieldname','mesh.elements','size',[md.mesh.numberofelements,3]) +# if numpy.any(numpy.logical_not(m.ismember(numpy.arange(1,md.mesh.numberofvertices+1),md.mesh.elements))): + if any(numpy.logical_not(m.ismember(numpy.arange(1,md.mesh.numberofvertices+1),md.mesh.elements))): + [x for x in A if not x in B] + md.checkmessage("orphan nodes have been found. Check the mesh outline") + md = checkfield(md,'fieldname','mesh.numberofelements','>',0) + md = checkfield(md,'fieldname','mesh.numberofvertices','>',0) + md = checkfield(md,'fieldname','mesh.average_vertex_connectivity','>=',9,'message',"'mesh.average_vertex_connectivity' should be at least 9 in 2d") + if solution==ThermalSolutionEnum(): + md.checkmessage("thermal not supported for 2d mesh") + + return md + # }}} + def domaintype(self): # {{{ + return "2Dhorizontal" + #}}} + def dimension(self): # {{{ + return 2 + #}}} + def elementtype(self): # {{{ + return "Tria" + #}}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'enum',DomainTypeEnum(),'data',StringToEnum("Domain"+self.domaintype())[0],'format','Integer'); + WriteData(fid,'enum',DomainDimensionEnum(),'data',self.dimension(),'format','Integer'); + WriteData(fid,'enum',MeshElementtypeEnum(),'data',StringToEnum(self.elementtype())[0],'format','Integer'); + WriteData(fid,'object',self,'class','mesh','fieldname','x','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'class','mesh','fieldname','y','format','DoubleMat','mattype',1) + WriteData(fid,'enum',MeshZEnum(),'data',numpy.zeros(self.numberofvertices),'format','DoubleMat','mattype',1); + WriteData(fid,'object',self,'class','mesh','fieldname','elements','format','DoubleMat','mattype',2) + WriteData(fid,'object',self,'class','mesh','fieldname','numberofelements','format','Integer') + WriteData(fid,'object',self,'class','mesh','fieldname','numberofvertices','format','Integer') + WriteData(fid,'object',self,'class','mesh','fieldname','average_vertex_connectivity','format','Integer') + # }}} Index: ../trunk-jpl/src/py3/classes/bamgmesh.py =================================================================== --- ../trunk-jpl/src/py3/classes/bamgmesh.py (revision 0) +++ ../trunk-jpl/src/py3/classes/bamgmesh.py (revision 19895) @@ -0,0 +1,61 @@ +import numpy + +class bamgmesh(object): + """ + BAMGMESH class definition + + Usage: + bamgmesh(varargin) + """ + + def __init__(self,*args): # {{{ + self.Vertices=numpy.empty((0,3)) + self.Edges=numpy.empty((0,3)) + self.Triangles=numpy.empty((0,0)) + self.Quadrilaterals=numpy.empty((0,0)) + self.IssmEdges=numpy.empty((0,0)) + self.IssmSegments=numpy.empty((0,0)) + self.VerticesOnGeomVertex=numpy.empty((0,0)) + self.VerticesOnGeomEdge=numpy.empty((0,0)) + self.EdgesOnGeomEdge=numpy.empty((0,0)) + self.SubDomains=numpy.empty((0,4)) + self.SubDomainsFromGeom=numpy.empty((0,0)) + self.ElementConnectivity=numpy.empty((0,0)) + self.NodalConnectivity=numpy.empty((0,0)) + self.NodalElementConnectivity=numpy.empty((0,0)) + self.CrackedVertices=numpy.empty((0,0)) + self.CrackedEdges=numpy.empty((0,0)) + + if not len(args): + # if no input arguments, create a default object + pass + + elif len(args) == 1: + object=args[0] + for field in list(object.keys()): + if field in vars(self): + setattr(self,field,object[field]) + + else: + raise TypeError("bamgmesh constructor error message: unknown type of constructor call") + # }}} + def __repr__(self): # {{{ + s ="class '%s' object '%s' = \n" % (type(self),'self') + s+=" Vertices: %s\n" % str(self.Vertices) + s+=" Edges: %s\n" % str(self.Edges) + s+=" Triangles: %s\n" % str(self.Triangles) + s+=" Quadrilaterals: %s\n" % str(self.Quadrilaterals) + s+=" IssmEdges: %s\n" % str(self.IssmEdges) + s+=" IssmSegments: %s\n" % str(self.IssmSegments) + s+=" VerticesOnGeomVertex: %s\n" % str(self.VerticesOnGeomVertex) + s+=" VerticesOnGeomEdge: %s\n" % str(self.VerticesOnGeomEdge) + s+=" EdgesOnGeomEdge: %s\n" % str(self.EdgesOnGeomEdge) + s+=" SubDomains: %s\n" % str(self.SubDomains) + s+=" SubDomainsFromGeom: %s\n" % str(self.SubDomainsFromGeom) + s+=" ElementConnectivity: %s\n" % str(self.ElementConnectivity) + s+=" NodalConnectivity: %s\n" % str(self.NodalConnectivity) + s+=" NodalElementConnectivity: %s\n" % str(self.NodalElementConnectivity) + s+=" CrackedVertices: %s\n" % str(self.CrackedVertices) + s+=" CrackedEdges: %s\n" % str(self.CrackedEdges) + return s + # }}} Index: ../trunk-jpl/src/py3/classes/debug.py =================================================================== --- ../trunk-jpl/src/py3/classes/debug.py (revision 0) +++ ../trunk-jpl/src/py3/classes/debug.py (revision 19895) @@ -0,0 +1,34 @@ +from fielddisplay import fielddisplay +from WriteData import * + +class debug(object): + """ + DEBUG class definition + + Usage: + debug=debug(); + """ + + def __init__(self): # {{{ + self.valgrind = False + self.gprof = False + self.profiling = False + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=" debug parameters:" + + string="%s\n%s"%(string,fielddisplay(self,"valgrind","use Valgrind to debug (0 or 1)")) + string="%s\n%s"%(string,fielddisplay(self,"gprof","use gnu-profiler to find out where the time is spent")) + string="%s\n%s"%(string,fielddisplay(self,'profiling','enables profiling (memory, flops, time)')) + return string + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def marshall(self,md,fid): # {{{ + WriteData(fid,'object',self,'fieldname','profiling','format','Boolean') + # }}} Index: ../trunk-jpl/src/py3/classes/initialization.py =================================================================== --- ../trunk-jpl/src/py3/classes/initialization.py (revision 0) +++ ../trunk-jpl/src/py3/classes/initialization.py (revision 19895) @@ -0,0 +1,132 @@ +import numpy +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData +import MatlabFuncs as m + +class initialization(object): + """ + INITIALIZATION class definition + + Usage: + initialization=initialization(); + """ + + def __init__(self): # {{{ + + self.vx = float('NaN') + self.vy = float('NaN') + self.vz = float('NaN') + self.vel = float('NaN') + self.pressure = float('NaN') + self.temperature = float('NaN') + self.waterfraction = float('NaN') + self.watercolumn = float('NaN') + self.sediment_head = float('NaN') + self.epl_head = float('NaN') + self.epl_thickness = float('NaN') + + #set defaults + self.setdefaultparameters() + + #}}} + def __repr__(self): # {{{ + string=' initial field values:' + string="%s\n%s"%(string,fielddisplay(self,'vx','x component of velocity [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'vy','y component of velocity [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'vz','z component of velocity [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'vel','velocity norm [m/yr]')) + string="%s\n%s"%(string,fielddisplay(self,'pressure','pressure [Pa]')) + string="%s\n%s"%(string,fielddisplay(self,'temperature','temperature [K]')) + string="%s\n%s"%(string,fielddisplay(self,'waterfraction','fraction of water in the ice')) + string="%s\n%s"%(string,fielddisplay(self,'watercolumn','thickness of subglacial water [m]')) + string="%s\n%s"%(string,fielddisplay(self,'sediment_head','sediment water head of subglacial system [m]')) + string="%s\n%s"%(string,fielddisplay(self,'epl_head','epl water head of subglacial system [m]')) + string="%s\n%s"%(string,fielddisplay(self,'epl_thickness','thickness of the epl [m]')) + + return string + #}}} + def extrude(self,md): # {{{ + self.vx=project3d(md,'vector',self.vx,'type','node') + self.vy=project3d(md,'vector',self.vy,'type','node') + self.vz=project3d(md,'vector',self.vz,'type','node') + self.vel=project3d(md,'vector',self.vel,'type','node') + self.temperature=project3d(md,'vector',self.temperature,'type','node') + self.waterfraction=project3d(md,'vector',self.waterfraction,'type','node') + self.watercolumn=project3d(md,'vector',self.watercolumn,'type','node') + self.sediment_head=project3d(md,'vector',self.sediment_head,'type','node','layer',1) + self.epl_head=project3d(md,'vector',self.epl_head,'type','node','layer',1) + self.epl_thickness=project3d(md,'vector',self.epl_thickness,'type','node','layer',1) + + #Lithostatic pressure by default + self.pressure=md.constants.g*md.materials.rho_ice*(md.geometry.surface-md.mesh.z.reshape(-1,1)) + return self + #}}} + def setdefaultparameters(self): # {{{ + return self + #}}} + def checkconsistency(self,md,solution,analyses): # {{{ + if StressbalanceAnalysisEnum() in analyses: + if not numpy.any(numpy.logical_or(numpy.isnan(md.initialization.vx),numpy.isnan(md.initialization.vy))): + md = checkfield(md,'fieldname','initialization.vx','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','initialization.vy','NaN',1,'size',[md.mesh.numberofvertices]) + if MasstransportAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','initialization.vx','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','initialization.vy','NaN',1,'size',[md.mesh.numberofvertices]) + if BalancethicknessAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','initialization.vx','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','initialization.vy','NaN',1,'size',[md.mesh.numberofvertices]) + #Triangle with zero velocity + if numpy.any(numpy.logical_and(numpy.sum(numpy.abs(md.initialization.vx[md.mesh.elements-1]),axis=1)==0,\ + numpy.sum(numpy.abs(md.initialization.vy[md.mesh.elements-1]),axis=1)==0)): + md.checkmessage("at least one triangle has all its vertices with a zero velocity") + if ThermalAnalysisEnum() in analyses: + md = checkfield(md,'fieldname','initialization.vx','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','initialization.vy','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'size',[md.mesh.numberofvertices]) + if md.mesh.dimension()==3: + md = checkfield(md,'fieldname','initialization.vz','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','initialization.pressure','NaN',1,'size',[md.mesh.numberofvertices]) + if (EnthalpyAnalysisEnum() in analyses and md.thermal.isenthalpy): + md = checkfield(md,'fieldname','initialization.waterfraction','>=',0,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','initialization.watercolumn' ,'>=',0,'size',[md.mesh.numberofvertices]) + if HydrologyShreveAnalysisEnum() in analyses: + if hasattr(md.hydrology,'hydrologyshreve'): + md = checkfield(md,'fieldname','initialization.watercolumn','NaN',1,'size',[md.mesh.numberofvertices]) + if HydrologyDCInefficientAnalysisEnum() in analyses: + if hasattr(md.hydrology,'hydrologydc'): + md = checkfield(md,'fieldname','initialization.sediment_head','NaN',1,'size',[md.mesh.numberofvertices,1]) + if HydrologyDCEfficientAnalysisEnum() in analyses: + if hasattr(md.hydrology,'hydrologydc'): + if md.hydrology.isefficientlayer==1: + md = checkfield(md,'fieldname','initialization.epl_head','NaN',1,'size',[md.mesh.numberofvertices,1]) + md = checkfield(md,'fieldname','initialization.epl_thickness','NaN',1,'size',[md.mesh.numberofvertices,1]) + + return md + # }}} + def marshall(self,md,fid): # {{{ + + yts=365.0*24.0*3600.0 + + WriteData(fid,'data',self.vx,'format','DoubleMat','mattype',1,'enum',VxEnum(),'scale',1./yts) + WriteData(fid,'data',self.vy,'format','DoubleMat','mattype',1,'enum',VyEnum(),'scale',1./yts) + WriteData(fid,'data',self.vz,'format','DoubleMat','mattype',1,'enum',VzEnum(),'scale',1./yts) + WriteData(fid,'data',self.pressure,'format','DoubleMat','mattype',1,'enum',PressureEnum()) + WriteData(fid,'data',self.temperature,'format','DoubleMat','mattype',1,'enum',TemperatureEnum()) + WriteData(fid,'data',self.waterfraction,'format','DoubleMat','mattype',1,'enum',WaterfractionEnum()) + WriteData(fid,'data',self.watercolumn,'format','DoubleMat','mattype',1,'enum',WatercolumnEnum()) + WriteData(fid,'data',self.sediment_head,'format','DoubleMat','mattype',1,'enum',SedimentHeadEnum()) + WriteData(fid,'data',self.epl_head,'format','DoubleMat','mattype',1,'enum',EplHeadEnum()) + WriteData(fid,'data',self.epl_thickness,'format','DoubleMat','mattype',1,'enum',HydrologydcEplThicknessEnum()) + + + if md.thermal.isenthalpy: + tpmp = md.materials.meltingpoint - md.materials.beta*md.initialization.pressure; + pos = numpy.nonzero(md.initialization.temperature > tpmp)[0] + enthalpy = md.materials.heatcapacity*(md.initialization.temperature-md.constants.referencetemperature); + enthalpy[pos] = md.materials.heatcapacity*tpmp[pos].reshape(-1,1) - md.constants.referencetemperature + md.materials.latentheat*md.initialization.waterfraction[pos].reshape(-1,1) + WriteData(fid,'data',enthalpy,'format','DoubleMat','mattype',1,'enum',EnthalpyEnum()); + + # }}} Index: ../trunk-jpl/src/py3/classes/hydrologydc.py =================================================================== --- ../trunk-jpl/src/py3/classes/hydrologydc.py (revision 0) +++ ../trunk-jpl/src/py3/classes/hydrologydc.py (revision 19895) @@ -0,0 +1,219 @@ +import numpy +from project3d import project3d +from fielddisplay import fielddisplay +from EnumDefinitions import * +from checkfield import checkfield +from WriteData import WriteData + +class hydrologydc(object): + """ + Hydrologydc class definition + + Usage: + hydrologydc=hydrologydc(); + """ + + def __init__(self): # {{{ + self.water_compressibility = 0 + self.isefficientlayer = 0 + self.penalty_factor = 0 + self.penalty_lock = 0 + self.rel_tol = 0 + self.max_iter = 0 + self.sedimentlimit_flag = 0 + self.sedimentlimit = 0 + self.transfer_flag = 0 + self.leakage_factor = 0 + self.basal_moulin_input = float('NaN') + + self.spcsediment_head = float('NaN') + self.sediment_transmitivity = float('NaN') + self.sediment_compressibility = 0 + self.sediment_porosity = 0 + self.sediment_thickness = 0 + + + self.spcepl_head = float('NaN') + self.mask_eplactive_node = float('NaN') + self.epl_compressibility = 0 + self.epl_porosity = 0 + self.epl_initial_thickness = 0 + self.epl_colapse_thickness = 0 + self.epl_thick_comp = 0 + self.epl_max_thickness = 0 + self.epl_conductivity = 0 + self.eplflip_lock = 0 + + #set defaults + self.setdefaultparameters() + #}}} + def __repr__(self): # {{{ + string=' hydrology Dual Porous Continuum Equivalent parameters:' + string=' - general parameters' + string="%s\n%s"%(string,fielddisplay(self,'water_compressibility','compressibility of water [Pa^-1]')) + string="%s\n%s"%(string,fielddisplay(self,'isefficientlayer','do we use an efficient drainage system [1: true 0: false]')) + string="%s\n%s"%(string,fielddisplay(self,'penalty_factor','exponent of the value used in the penalisation method [dimensionless]')) + string="%s\n%s"%(string,fielddisplay(self,'penalty_lock','stabilize unstable constraints that keep zigzagging after n iteration (default is 0, no stabilization)')) + string="%s\n%s"%(string,fielddisplay(self,'rel_tol','tolerance of the nonlinear iteration for the transfer between layers [dimensionless]')) + string="%s\n%s"%(string,fielddisplay(self,'max_iter','maximum number of nonlinear iteration')) + string="%s\n%s"%(string,fielddisplay(self,'basal_moulin_input','water flux at a given point [m3 s-1]')) + string="%s\n%s"%(string,fielddisplay(self,'sedimentlimit_flag','what kind of upper limit is applied for the inefficient layer')) + string="%s\n\t\t%s"%(string,'0: no limit') + string="%s\n\t\t%s"%(string,'1: user defined sedimentlimit') + string="%s\n\t\t%s"%(string,'2: hydrostatic pressure') + string="%s\n\t\t%s"%(string,'3: normal stress') + + if self.sedimentlimit_flag==1: + string="%s\n%s"%(string,fielddisplay(self,'sedimentlimit','user defined upper limit for the inefficient layer [m]')) + + string="%s\n%s"%(string,fielddisplay(self,'transfer_flag','what kind of transfer method is applied between the layers')) + string="%s\n\t\t%s"%(string,'0: no transfer') + string="%s\n\t\t%s"%(string,'1: constant leakage factor: leakage_factor') + + if self.transfer_flag is 1: + string="%s\n%s"%(string,fielddisplay(self,'leakage_factor','user defined leakage factor [m]')) + + string="%s\n%s"%(string,' - for the sediment layer') + string="%s\n%s"%(string,fielddisplay(self,'spcsediment_head','sediment water head constraints (NaN means no constraint) [m above MSL]')) + string="%s\n%s"%(string,fielddisplay(self,'sediment_compressibility','sediment compressibility [Pa^-1]')) + string="%s\n%s"%(string,fielddisplay(self,'sediment_porosity','sediment [dimensionless]')) + string="%s\n%s"%(string,fielddisplay(self,'sediment_thickness','sediment thickness [m]')) + string="%s\n%s"%(string,fielddisplay(self,'sediment_transmitivity','sediment transmitivity [m^2/s]')) + + if self.isefficientlayer==1: + string="%s\n%s"%(string,' - for the epl layer') + string="%s\n%s"%(string,fielddisplay(self,'spcepl_head','epl water head constraints (NaN means no constraint) [m above MSL]')) + string="%s\n%s"%(string,fielddisplay(self,'mask_eplactive_node','active (1) or not (0) EPL')) + string="%s\n%s"%(string,fielddisplay(self,'epl_compressibility','epl compressibility [Pa^-1]')) + string="%s\n%s"%(string,fielddisplay(self,'epl_porosity','epl [dimensionless]')) + string="%s\n%s"%(string,fielddisplay(self,'epl_max_thickness','epl initial thickness [m]')) + string="%s\n%s"%(string,fielddisplay(self,'epl_initial_thickness','epl initial thickness [m]')) + string="%s\n%s"%(string,fielddisplay(self,'epl_colapse_thickness','epl colapsing thickness [m]')) + string="%s\n%s"%(string,fielddisplay(self,'epl_thick_comp','epl thickness computation flag')) + string="%s\n%s"%(string,fielddisplay(self,'epl_conductivity','epl conductivity [m^2/s]')) + string="%s\n%s"%(string,fielddisplay(self,'eplflip_lock','lock epl activity to avoid flip-floping (default is 0, no stabilization)')) + return string +#}}} + def extrude(self,md): # {{{ + self.spcsediment_head=project3d(md,'vector',self.spcsediment_head,'type','node','layer',1) + self.spcepl_head=project3d(md,'vector',self.spcepl_head,'type','node','layer',1) + self.mask_eplactive_node=project3d(md,'vector',self.mask_eplactive_node,'type','node','layer',1) + self.sediment_transmitivity=project3d(md,'vector',self.sediment_transmitivity,'type','node','layer',1) + self.basal_moulin_input=project3d(md,'vector',self.basal_moulin_input,'type','node','layer',1) + if self.isefficientlayer==1 : + self.spcepl_head=project3d(md,'vector',self.spcepl_head,'type','node','layer',1) + return self + #}}} + def setdefaultparameters(self): #{{{ + + #Parameters from de Fleurian 2014 + self.water_compressibility = 5.04e-10 + self.isefficientlayer = 1 + self.penalty_factor = 3 + self.penalty_lock = 0 + self.rel_tol = 1.0e-06 + self.max_iter = 100 + self.sedimentlimit_flag = 0 + self.sedimentlimit = 0 + self.transfer_flag = 0 + self.leakage_factor = 10.0 + + self.sediment_compressibility = 1.0e-08 + self.sediment_porosity = 0.4 + self.sediment_thickness = 20.0 + self.sediment_transmitivity = 8.0e-04 + + self.epl_compressibility = 1.0e-08 + self.epl_porosity = 0.4 + self.epl_initial_thickness = 1.0 + self.epl_colapse_thickness = 1.0e-3 + self.epl_thick_comp = 1 + self.epl_max_thickness = 5.0 + self.epl_conductivity = 8.0e-02 + self.eplflip_lock = 0 + + return self + # }}} + def initialize(self,md): # {{{ + if numpy.all(numpy.isnan(self.basal_moulin_input)): + self.basal_moulin_input=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no hydrology.basal_moulin_input specified: values set as zero") + + return self + # }}} + def checkconsistency(self,md,solution,analyses): #{{{ + + #Early return + if HydrologyDCInefficientAnalysisEnum() not in analyses and HydrologyDCEfficientAnalysisEnum() not in analyses: + return md + + md = checkfield(md,'fieldname','hydrology.water_compressibility','numel',[1],'>',0.) + md = checkfield(md,'fieldname','hydrology.isefficientlayer','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','hydrology.penalty_factor','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.penalty_lock','>=',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.rel_tol','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.max_iter','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.sedimentlimit_flag','numel',[1],'values',[0,1,2,3]) + md = checkfield(md,'fieldname','hydrology.transfer_flag','numel',[1],'values',[0,1]) + + if self.sedimentlimit_flag==1: + md = checkfield(md,'fieldname','hydrology.sedimentlimit','>',0.,'numel',[1]) + + if self.transfer_flag==1: + md = checkfield(md,'fieldname','hydrology.leakage_factor','>',0.,'numel',[1]) + + md = checkfield(md,'fieldname','hydrology.basal_moulin_input','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','hydrology.spcsediment_head','timeseries',1) + md = checkfield(md,'fieldname','hydrology.sediment_compressibility','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.sediment_porosity','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.sediment_thickness','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.sediment_transmitivity','>=',0,'size',[md.mesh.numberofvertices,1]) + if self.isefficientlayer==1: + md = checkfield(md,'fieldname','hydrology.spcepl_head','timeseries',1) + md = checkfield(md,'fieldname','hydrology.mask_eplactive_node','size',[md.mesh.numberofvertices,1],'values',[0,1]) + md = checkfield(md,'fieldname','hydrology.epl_compressibility','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.epl_porosity','>',0.,'numel',[1]) + md = checkfield(md,'fieldname','hydrology.epl_max_thickness','numel',[1],'>',0.) + md = checkfield(md,'fieldname','hydrology.epl_initial_thickness','numel',[1],'>',0.) + md = checkfield(md,'fieldname','hydrology.epl_colapse_thickness','numel',[1],'>',0.) + md = checkfield(md,'fieldname','hydrology.epl_thick_comp','numel',[1],'values',[0,1]) + md = checkfield(md,'fieldname','hydrology.eplflip_lock','>=',0.,'numel',[1]) + if self.epl_colapse_thickness > self.epl_initial_thickness: + md.checkmessage('Colapsing thickness for EPL larger than initial thickness') + md = checkfield(md,'fieldname','hydrology.epl_conductivity','numel',[1],'>',0.) + # }}} + def marshall(self,md,fid): #{{{ + WriteData(fid,'enum',HydrologyModelEnum(),'data',HydrologydcEnum(),'format','Integer') + WriteData(fid,'object',self,'fieldname','water_compressibility','format','Double') + WriteData(fid,'object',self,'fieldname','isefficientlayer','format','Boolean') + WriteData(fid,'object',self,'fieldname','penalty_factor','format','Double') + WriteData(fid,'object',self,'fieldname','penalty_lock','format','Integer') + WriteData(fid,'object',self,'fieldname','rel_tol','format','Double') + WriteData(fid,'object',self,'fieldname','max_iter','format','Integer') + WriteData(fid,'object',self,'fieldname','sedimentlimit_flag','format','Integer') + WriteData(fid,'object',self,'fieldname','transfer_flag','format','Integer') + if self.sedimentlimit_flag==1: + WriteData(fid,'object',self,'fieldname','sedimentlimit','format','Double') + + if self.transfer_flag==1: + WriteData(fid,'object',self,'fieldname','leakage_factor','format','Double') + + WriteData(fid,'object',self,'fieldname','basal_moulin_input','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','spcsediment_head','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','sediment_compressibility','format','Double') + WriteData(fid,'object',self,'fieldname','sediment_porosity','format','Double') + WriteData(fid,'object',self,'fieldname','sediment_thickness','format','Double') + WriteData(fid,'object',self,'fieldname','sediment_transmitivity','format','DoubleMat','mattype',1) + + if self.isefficientlayer==1: + WriteData(fid,'object',self,'fieldname','spcepl_head','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1) + WriteData(fid,'object',self,'fieldname','mask_eplactive_node','format','DoubleMat','mattype',1) + WriteData(fid,'object',self,'fieldname','epl_compressibility','format','Double') + WriteData(fid,'object',self,'fieldname','epl_porosity','format','Double') + WriteData(fid,'object',self,'fieldname','epl_max_thickness','format','Double') + WriteData(fid,'object',self,'fieldname','epl_initial_thickness','format','Double') + WriteData(fid,'object',self,'fieldname','epl_colapse_thickness','format','Double') + WriteData(fid,'object',self,'fieldname','epl_thick_comp','format','Integer') + WriteData(fid,'object',self,'fieldname','epl_conductivity','format','Double') + WriteData(fid,'object',self,'fieldname','eplflip_lock','format','Integer') + # }}} Index: ../trunk-jpl/src/py3/coordsystems/ll2xy.py =================================================================== --- ../trunk-jpl/src/py3/coordsystems/ll2xy.py (revision 0) +++ ../trunk-jpl/src/py3/coordsystems/ll2xy.py (revision 19895) @@ -0,0 +1,62 @@ +import numpy as npy + +def ll2xy(lat,lon,sgn=-1,central_meridian=0,standard_parallel=71): + ''' + LL2XY - converts lat lon to polar stereographic + + Converts from geodetic latitude and longitude to Polar + Stereographic (X,Y) coordinates for the polar regions. + Author: Michael P. Schodlok, December 2003 (map2ll) + + Usage: + x,y = ll2xy(lat,lon,sgn) + x,y = ll2xy(lat,lon,sgn,central_meridian,standard_parallel) + + - sgn = Sign of latitude +1 : north latitude (default is mer=45 lat=70) + -1 : south latitude (default is mer=0 lat=71) + ''' + + assert sgn==1 or sgn==-1, 'error: sgn should be either +1 or -1' + + #Get central_meridian and standard_parallel depending on hemisphere + if sgn == 1: + delta = 45 + slat = 70 + print(' ll2xy: creating coordinates in north polar stereographic (Std Latitude: 70N Meridian: 45)') + else: + delta = central_meridian + slat = standard_parallel + print(' ll2xy: creating coordinates in south polar stereographic (Std Latitude: 71S Meridian: 0)') + + # Conversion constant from degrees to radians + cde = 57.29577951 + # Radius of the earth in meters + re = 6378.273*10**3 + # Eccentricity of the Hughes ellipsoid squared + ex2 = .006693883 + # Eccentricity of the Hughes ellipsoid + ex = npy.sqrt(ex2) + + latitude = npy.abs(lat) * npy.pi/180. + longitude = (lon + delta) * npy.pi/180. + + # compute X and Y in grid coordinates. + T = npy.tan(npy.pi/4-latitude/2) / ((1-ex*npy.sin(latitude))/(1+ex*npy.sin(latitude)))**(ex/2) + + if (90 - slat) < 1.e-5: + rho = 2.*re*T/npy.sqrt((1.+ex)**(1.+ex)*(1.-ex)**(1.-ex)) + else: + sl = slat*npy.pi/180. + tc = npy.tan(npy.pi/4.-sl/2.)/((1.-ex*npy.sin(sl))/(1.+ex*npy.sin(sl)))**(ex/2.) + mc = npy.cos(sl)/npy.sqrt(1.0-ex2*(npy.sin(sl)**2)) + rho = re*mc*T/tc + + y = -rho * sgn * npy.cos(sgn*longitude) + x = rho * sgn * npy.sin(sgn*longitude) + + cnt1=npy.nonzero(latitude>= npy.pi/2.)[0] + + if cnt1: + x[cnt1,0] = 0.0 + y[cnt1,0] = 0.0 + return x,y Index: ../trunk-jpl/src/py3/coordsystems/xy2ll.py =================================================================== --- ../trunk-jpl/src/py3/coordsystems/xy2ll.py (revision 0) +++ ../trunk-jpl/src/py3/coordsystems/xy2ll.py (revision 19895) @@ -0,0 +1,82 @@ +import numpy as npy +from math import pi + +def xy2ll(x, y, sgn, *args): + ''' + XY2LL - converts xy to lat long + + Converts Polar Stereographic (X, Y) coordinates for the polar regions to + latitude and longitude Stereographic (X, Y) coordinates for the polar + regions. + Author: Michael P. Schodlok, December 2003 (map2xy.m) + + Usage: + [lat, lon] = xy2ll(x, y, sgn); + [lat, lon] = xy2ll(x, y, sgn, central_meridian, standard_parallel); + + - sgn = Sign of latitude +1 : north latitude (default is mer=45 lat=70) + -1 : south latitude (default is mer=0 lat=71) + ''' + + #Get central_meridian and standard_parallel depending on hemisphere + if len(args) == 2: + delta = args[0] + slat = args[1] + elif len(args) == 0: + if sgn == 1: + delta = 45. + slat = 70. + print(' xy2ll: creating coordinates in north polar stereographic (Std Latitude: 70degN Meridian: 45deg)') + elif sgn == -1: + delta = 0. + slat = 71. + print(' xy2ll: creating coordinates in south polar stereographic (Std Latitude: 71degS Meridian: 0deg)') + else: + raise ValueError('sgn should be either +1 or -1') + else: + raise Exception('bad usage: type "help(xy2ll)" for details') + + # if x,y passed as lists, convert to numpy arrays + if type(x) != "numpy.ndarray": + x=npy.array(x) + if type(y) != "numpy.ndarray": + y=npy.array(y) + + ## Conversion constant from degrees to radians + cde = 57.29577951 + ## Radius of the earth in meters + re = 6378.273*10**3 + ## Eccentricity of the Hughes ellipsoid squared + ex2 = .006693883 + ## Eccentricity of the Hughes ellipsoid + ex = npy.sqrt(ex2) + + sl = slat*pi/180. + rho = npy.sqrt(x**2 + y**2) + cm = npy.cos(sl) / npy.sqrt(1.0 - ex2 * (npy.sin(sl)**2)) + T = npy.tan((pi/4.0) - (sl/2.0)) / ((1.0 - ex*npy.sin(sl)) / (1.0 + ex*npy.sin(sl)))**(ex / 2.0) + + if abs(slat-90.) < 1.e-5: + T = rho*npy.sqrt((1. + ex)**(1. + ex) * (1. - ex)**(1. - ex)) / 2. / re + else: + T = rho * T / (re * cm) + + chi = (pi / 2.0) - 2.0 * npy.arctan(T) + lat = chi + ((ex2 / 2.0) + (5.0 * ex2**2.0 / 24.0) + (ex2**3.0 / 12.0)) * \ + npy.sin(2 * chi) + ((7.0 * ex2**2.0 / 48.0) + (29.0 * ex2**3 / 240.0)) * \ + npy.sin(4.0 * chi) + (7.0 * ex2**3.0 / 120.0) * npy.sin(6.0 * chi) + + lat = sgn * lat + lon = npy.arctan2(sgn * x,-sgn * y) + lon = sgn * lon + + res1 = npy.nonzero(rho <= 0.1)[0] + if len(res1) > 0: + lat[res1] = 90. * sgn + lon[res1] = 0.0 + + lon = lon * 180. / pi + lat = lat * 180. / pi + lon = lon - delta + + return lat, lon Index: ../trunk-jpl/src/py3/inversions/supportedcontrols.py =================================================================== --- ../trunk-jpl/src/py3/inversions/supportedcontrols.py (revision 0) +++ ../trunk-jpl/src/py3/inversions/supportedcontrols.py (revision 19895) @@ -0,0 +1,2 @@ +def supportedcontrols(): + return ['BalancethicknessThickeningRate','FrictionCoefficient','FrictionAs','MaterialsRheologyBbar','DamageDbar','Vx','Vy'] Index: ../trunk-jpl/src/py3/inversions/parametercontroldrag.py =================================================================== --- ../trunk-jpl/src/py3/inversions/parametercontroldrag.py (revision 0) +++ ../trunk-jpl/src/py3/inversions/parametercontroldrag.py (revision 19895) @@ -0,0 +1,118 @@ +def parametercontroldrag(md,*args): + """ + PARAMETERCONTROLDRAG - parameterization for control method on drag + + It is possible to specify the number of steps, values for the + minimum and maximum values of the drag, the + kind of cm_responses to use or the the optscal. + + Usage: + md=parametercontroldrag(md,varargin) + + Example: + md=parametercontroldrag(md) + md=parametercontroldrag(md,'nsteps',20,'cm_responses',0) + md=parametercontroldrag(md,'cm_min',1,'cm_max',150,'cm_jump',0.99,'maxiter',20) + md=parametercontroldrag(md,eps_cm',10^-4,'optscal',[10^7 10^8]) + + See also PARAMETERCONTROLB + """ + + #process options + options=pairoptions(**kwargs) + + #control type + md.inversion.control_parameters='FrictionCoefficient' + + #weights + weights=options.getfieldvalue('weights',npy.ones(md.mesh.numberofvertices)) + if npy.size(weights)!=md.mesh.numberofvertices: + md.inversion.cost_functions_coefficients=ones(md.mesh.numberofvertices) + else: + md.inversion.cost_functions_coefficients=weights + + #nsteps + nsteps=options.getfieldvalue('nsteps',100); + if (npy.size(nsteps)!=1) | (nsteps<=0) | (floor(nsteps)!=nsteps): + md.inversion.nsteps=100 + else: + md.inversion.nsteps=nsteps + + #cm_min + cm_min=options.getfieldvalue('cm_min',ones(md.mesh.numberofvertices)) + if (npy.size(cm_min)==1): + md.inversion.min_parameters=cm_min*ones(md.mesh.numberofvertices) + elif (npy.size(cm_min)==md.mesh.numberofvertices): + md.inversion.min_parameters=cm_min + else: + md.inversion.min_parameters=cm_min; + + #cm_max + cm_max=options.getfieldvalue('cm_max',250*ones(md.mesh.numberofvertices)) + if (npy.size(cm_max)==1): + md.inversion.max_parameters=cm_max*ones(md.mesh.numberofvertices) + elif (npy.size(cm_max)==md.mesh.numberofvertices): + md.inversion.max_parameters=cm_max + else: + md.inversion.max_parameters=cm_max + + #eps_cm + eps_cm=optoins.getfieldvalue('eps_cm',float('nan')) + if (npy.size(eps_cm)~=1 | eps_cm<0 ): + md.inversion.cost_function_threshold=float('nan') + else: + md.inversion.cost_function_threshold=eps_cm + + #maxiter + maxiter=options.getfieldvalue('maxiter',10*ones(md.inversion.nsteps)) + if (npy.any(maxiter<0) | npy.any(floor(maxiter)~=maxiter)): + md.inversion.maxiter_per_step=10*ones(md.inversion.nsteps) + else: + raise RuntimeError("not implemented yet, see below matlab lines") + #md.inversion.maxiter_per_step=repmat(maxiter(:),md.inversion.nsteps,1); + #md.inversion.maxiter_per_step(md.inversion.nsteps+1:end)=[]; + + #cm_jump + cm_jump=options.getfieldvalue('cm_jump',0.8*ones(md.inversion.nsteps)) + if !npy.isreal(cm_jump): + md.inversion.step_threshold=0.8*ones(md.inversion.nsteps) + else: + raise RuntimeError("not implemented yet, see below matlab lines") + #md.inversion.step_threshold=repmat(cm_jump(:),md.inversion.nsteps,1); + #md.inversion.step_threshold(md.inversion.nsteps+1:end)=[]; + + #cm_responses + found=0; + if options.exist('cm_responses'): + cm_responses=options.getfieldvalue('cm_responses') + if ~any(~ismember(cm_responses,[101 105])): + md.inversion.cost_functions=repmat(cm_responses(:),md.inversion.nsteps,1); + md.inversion.cost_functions(md.inversion.nsteps+1:end)=[]; + found=1; + if ~found + third=ceil(md.inversion.nsteps/3); + md.inversion.cost_functions=[... + 103*ones(third,1);... + 101*ones(third,1);... + repmat([101;101;103;101],third,1)... + ]; + md.inversion.cost_functions(md.inversion.nsteps+1:end)=[]; + end + + %optscal + found=0; + if exist(options,'optscal'), + optscal=getfieldvalue(options,'optscal'); + if ~any(optscal<0), + md.inversion.gradient_scaling=repmat(optscal(:),md.inversion.nsteps,1); + md.inversion.gradient_scaling(md.inversion.nsteps+1:end)=[]; + found=1; + end + end + if ~found + third=ceil(md.inversion.nsteps/3); + md.inversion.gradient_scaling=[50*ones(3,1);15*ones(third-3,1);10*ones(third,1);repmat([10;10;20;10],third,1)]; + md.inversion.gradient_scaling(md.inversion.nsteps+1:end)=[]; + end + + return md Index: ../trunk-jpl/src/py3/inversions/marshallcostfunctions.py =================================================================== --- ../trunk-jpl/src/py3/inversions/marshallcostfunctions.py (revision 0) +++ ../trunk-jpl/src/py3/inversions/marshallcostfunctions.py (revision 19895) @@ -0,0 +1,33 @@ +import copy +from EnumDefinitions import * + +def marshallcostfunctions(cost_functions): + + #copy list first + data=copy.deepcopy(cost_functions) + + #convert to Enums + pos=[i for i,x in enumerate(cost_functions) if x==101]; + for i in pos: data[i]=SurfaceAbsVelMisfitEnum() + pos=[i for i,x in enumerate(cost_functions) if x==102]; + for i in pos: data[i]=SurfaceRelVelMisfitEnum() + pos=[i for i,x in enumerate(cost_functions) if x==103]; + for i in pos: data[i]=SurfaceLogVelMisfitEnum() + pos=[i for i,x in enumerate(cost_functions) if x==104]; + for i in pos: data[i]=SurfaceLogVxVyMisfitEnum() + pos=[i for i,x in enumerate(cost_functions) if x==105]; + for i in pos: data[i]=SurfaceAverageVelMisfitEnum() + pos=[i for i,x in enumerate(cost_functions) if x==201]; + for i in pos: data[i]=ThicknessAbsMisfitEnum() + pos=[i for i,x in enumerate(cost_functions) if x==501]; + for i in pos: data[i]=DragCoefficientAbsGradientEnum() + pos=[i for i,x in enumerate(cost_functions) if x==502]; + for i in pos: data[i]=RheologyBbarAbsGradientEnum() + pos=[i for i,x in enumerate(cost_functions) if x==503]; + for i in pos: data[i]=ThicknessAbsGradientEnum() + pos=[i for i,x in enumerate(cost_functions) if x==504]; + for i in pos: data[i]=ThicknessAlongGradientEnum() + pos=[i for i,x in enumerate(cost_functions) if x==505]; + for i in pos: data[i]=ThicknessAcrossGradientEnum() + + return data Index: ../trunk-jpl/src/py3/inversions/supportedcostfunctions.py =================================================================== --- ../trunk-jpl/src/py3/inversions/supportedcostfunctions.py (revision 0) +++ ../trunk-jpl/src/py3/inversions/supportedcostfunctions.py (revision 19895) @@ -0,0 +1,2 @@ +def supportedcostfunctions(): + return [101,102,103,104,105,201,501,502,503,504,505] Index: ../trunk-jpl/src/py3/tmp =================================================================== --- ../trunk-jpl/src/py3/tmp (revision 0) +++ ../trunk-jpl/src/py3/tmp (revision 19895) @@ -0,0 +1,425 @@ + +md = checkfield(md,'fieldname','friction.coefficient','timeseries',1,'NaN',1) +md = checkfield(md,'fieldname','friction.coefficientcoulomb','timeseries',1,'NaN',1) +md = checkfield(md,'fieldname','friction.q','NaN',1,'size',[md.mesh.numberofelements]) +md = checkfield(md,'fieldname','friction.p','NaN',1,'size',[md.mesh.numberofelements]) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'size',[md.mesh.numberofvertices]) + md = checkfield(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1) + md = checkfield(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1]) + md = checkfield(md,'fieldname','basalforcings.geothermalflux','NaN',1,'timeseries',1,'>=',0) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + md = checkfield(md,fieldname,options); Index: ../trunk-jpl/src/py3/contrib/netCDF/read_netCDF.py.bak =================================================================== --- ../trunk-jpl/src/py3/contrib/netCDF/read_netCDF.py.bak (revision 0) +++ ../trunk-jpl/src/py3/contrib/netCDF/read_netCDF.py.bak (revision 19895) @@ -0,0 +1,25 @@ +from netCDF4 import Dataset +import time +import collections +from os import path, remove + +def netCDFRead(filename): + + def walktree(data): + keys = data.groups.keys() + yield keys + for key in keys: + for children in walktree(data.groups[str(key)]): + yield children + + if path.exists(filename): + print ('Opening {} for reading '.format(filename)) + NCData=Dataset(filename, 'r') + class_dict={} + + for children in walktree(NCData): + for child in children: + class_dict[str(child)]=str(getattr(NCData.groups[str(child)],'classtype')+'()') + + print class_dict + Index: ../trunk-jpl/src/py3/contrib/netCDF/read_netCDF.py =================================================================== --- ../trunk-jpl/src/py3/contrib/netCDF/read_netCDF.py (revision 0) +++ ../trunk-jpl/src/py3/contrib/netCDF/read_netCDF.py (revision 19895) @@ -0,0 +1,25 @@ +from netCDF4 import Dataset +import time +import collections +from os import path, remove + +def netCDFRead(filename): + + def walktree(data): + keys = list(data.groups.keys()) + yield keys + for key in keys: + for children in walktree(data.groups[str(key)]): + yield children + + if path.exists(filename): + print(('Opening {} for reading '.format(filename))) + NCData=Dataset(filename, 'r') + class_dict={} + + for children in walktree(NCData): + for child in children: + class_dict[str(child)]=str(getattr(NCData.groups[str(child)],'classtype')+'()') + + print(class_dict) + Index: ../trunk-jpl/src/py3/contrib/netCDF/ClassTry.py.bak =================================================================== --- ../trunk-jpl/src/py3/contrib/netCDF/ClassTry.py.bak (revision 0) +++ ../trunk-jpl/src/py3/contrib/netCDF/ClassTry.py.bak (revision 19895) @@ -0,0 +1,122 @@ +#module imports {{{ +from netCDF4 import Dataset +import time +import collections +from os import path, remove +#}}} + + +class truc(object): + #properties + def __init__(self,*filename):#{{{ + + def netCDFread(filename): + def walktree(data): + keys = data.groups.keys() + yield keys + for key in keys: + for children in walktree(data.groups[str(key)]): + yield children + + if path.exists(filename): + print ('Opening {} for reading '.format(filename)) + NCData=Dataset(filename, 'r') + class_dict={} + + for children in walktree(NCData): + for child in children: + class_dict[str(child)]=str(getattr(NCData.groups[str(child)],'classtype')) + + return class_dict + + if filename: + classtype=netCDFread(filename[0]) + else: + classtype=self.default_prop() + + module=map(__import__,dict.values(classtype)) + + for i,mod in enumerate(dict.keys(classtype)): + self.__dict__[mod] = getattr(module[i],str(classtype[str(mod)]))() + + #}}} + def default_prop(self): # {{{ + # ordered list of properties since vars(self) is random + return {'mesh':'mesh2d',\ + 'mask':'mask',\ + 'geometry':'geometry',\ + 'constants':'constants',\ + 'smb':'SMB',\ + 'basalforcings':'basalforcings',\ + 'materials':'matice',\ + 'damage':'damage',\ + 'friction':'friction',\ + 'flowequation':'flowequation',\ + 'timestepping':'timestepping',\ + 'initialization':'initialization',\ + 'rifts':'rifts',\ + 'debug':'debug',\ + 'verbose':'verbose',\ + 'settings':'settings',\ + 'toolkits':'toolkits',\ + 'cluster':'generic',\ + 'balancethickness':'balancethickness',\ + 'stressbalance':'stressbalance',\ + 'groundingline':'groundingline',\ + 'hydrology':'hydrologyshreve',\ + 'masstransport':'masstransport',\ + 'thermal':'thermal',\ + 'steadystate':'steadystate',\ + 'transient':'transient',\ + 'calving':'calving',\ + 'gia':'gia',\ + 'autodiff':'autodiff',\ + 'flaim':'flaim',\ + 'inversion':'inversion',\ + 'qmu':'qmu',\ + 'outputdefinition':'outputdefinition',\ + 'results':'results',\ + 'radaroverlay':'radaroverlay',\ + 'miscellaneous':'miscellaneous',\ + 'private':'private'} + # }}} + + def __repr__(obj): #{{{ + #print "Here %s the number: %d" % ("is", 37) + string="%19s: %-22s -- %s" % ("mesh","[%s,%s]" % ("1x1",obj.mesh.__class__.__name__),"mesh properties") + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("mask","[%s,%s]" % ("1x1",obj.mask.__class__.__name__),"defines grounded and floating elements")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("geometry","[%s,%s]" % ("1x1",obj.geometry.__class__.__name__),"surface elevation, bedrock topography, ice thickness,...")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("constants","[%s,%s]" % ("1x1",obj.constants.__class__.__name__),"physical constants")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("smb","[%s,%s]" % ("1x1",obj.smb.__class__.__name__),"surface forcings")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("basalforcings","[%s,%s]" % ("1x1",obj.basalforcings.__class__.__name__),"bed forcings")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("materials","[%s,%s]" % ("1x1",obj.materials.__class__.__name__),"material properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("damage","[%s,%s]" % ("1x1",obj.damage.__class__.__name__),"damage propagation laws")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("friction","[%s,%s]" % ("1x1",obj.friction.__class__.__name__),"basal friction/drag properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("flowequation","[%s,%s]" % ("1x1",obj.flowequation.__class__.__name__),"flow equations")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("timestepping","[%s,%s]" % ("1x1",obj.timestepping.__class__.__name__),"time stepping for transient models")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("initialization","[%s,%s]" % ("1x1",obj.initialization.__class__.__name__),"initial guess/state")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("rifts","[%s,%s]" % ("1x1",obj.rifts.__class__.__name__),"rifts properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("debug","[%s,%s]" % ("1x1",obj.debug.__class__.__name__),"debugging tools (valgrind, gprof)")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("verbose","[%s,%s]" % ("1x1",obj.verbose.__class__.__name__),"verbosity level in solve")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("settings","[%s,%s]" % ("1x1",obj.settings.__class__.__name__),"settings properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("toolkits","[%s,%s]" % ("1x1",obj.toolkits.__class__.__name__),"PETSc options for each solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("cluster","[%s,%s]" % ("1x1",obj.cluster.__class__.__name__),"cluster parameters (number of cpus...)")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("balancethickness","[%s,%s]" % ("1x1",obj.balancethickness.__class__.__name__),"parameters for balancethickness solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("stressbalance","[%s,%s]" % ("1x1",obj.stressbalance.__class__.__name__),"parameters for stressbalance solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("groundingline","[%s,%s]" % ("1x1",obj.groundingline.__class__.__name__),"parameters for groundingline solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("hydrology","[%s,%s]" % ("1x1",obj.hydrology.__class__.__name__),"parameters for hydrology solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("masstransport","[%s,%s]" % ("1x1",obj.masstransport.__class__.__name__),"parameters for masstransport solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("thermal","[%s,%s]" % ("1x1",obj.thermal.__class__.__name__),"parameters for thermal solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("steadystate","[%s,%s]" % ("1x1",obj.steadystate.__class__.__name__),"parameters for steadystate solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("transient","[%s,%s]" % ("1x1",obj.transient.__class__.__name__),"parameters for transient solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("calving","[%s,%s]" % ("1x1",obj.calving.__class__.__name__),"parameters for calving")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("autodiff","[%s,%s]" % ("1x1",obj.autodiff.__class__.__name__),"automatic differentiation parameters")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("flaim","[%s,%s]" % ("1x1",obj.flaim.__class__.__name__),"flaim parameters")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("inversion","[%s,%s]" % ("1x1",obj.inversion.__class__.__name__),"parameters for inverse methods")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("qmu","[%s,%s]" % ("1x1",obj.qmu.__class__.__name__),"dakota properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("outputdefinition","[%s,%s]" % ("1x1",obj.outputdefinition.__class__.__name__),"output definition")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("results","[%s,%s]" % ("1x1",obj.results.__class__.__name__),"model results")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("radaroverlay","[%s,%s]" % ("1x1",obj.radaroverlay.__class__.__name__),"radar image for plot overlay")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("miscellaneous","[%s,%s]" % ("1x1",obj.miscellaneous.__class__.__name__),"miscellaneous fields")) + return string + # }}} Index: ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py.bak =================================================================== --- ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py.bak (revision 0) +++ ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py.bak (revision 19895) @@ -0,0 +1,218 @@ +from netCDF4 import Dataset, stringtochar +import numpy +import time +import collections +from mesh2d import * +from mesh3dprisms import * +from results import * +from os import path, remove + +def export_netCDF(md,filename): + #Now going on Real treatment + if path.exists(filename): + print ('File {} allready exist'.format(filename)) + newname=raw_input('Give a new name or "delete" to replace: ') + if newname=='delete': + remove(filename) + else: + print ('New file name is {}'.format(newname)) + filename=newname + + NCData=Dataset(filename, 'w', format='NETCDF4') + NCData.description = 'Results for run' + md.miscellaneous.name + NCData.history = 'Created ' + time.ctime(time.time()) + + #gather geometry and timestepping as dimensions + Duration=md.timestepping.final_time-md.timestepping.start_time + if Duration>0 and md.timestepping.time_step*md.settings.output_frequency>0: + StepNum=Duration/(md.timestepping.time_step*md.settings.output_frequency) + else: + StepNum=1 + + Dimension1=NCData.createDimension('Dimension1',md.mesh.numberofelements) + Dimension2=NCData.createDimension('Dimension2',md.mesh.numberofvertices) + Dimension3=NCData.createDimension('Dimension3',numpy.shape(md.mesh.elements)[1]) + Dimension4=NCData.createDimension('Dimension4',StepNum) + Dimension5=NCData.createDimension('Dimension5',40) + Dimension6=NCData.createDimension('Dimension6',2) + + DimDict = {len(Dimension1):'Dimension1', + len(Dimension2):'Dimension2', + len(Dimension3):'Dimension3', + len(Dimension4):'Dimension4', + len(Dimension5):'Dimension5', + len(Dimension6):'Dimension6'} + + #get all model classes and create respective groups + groups=dict.keys(md.__dict__) + for group in groups: + NCgroup=NCData.createGroup(str(group)) + #In each group gather the fields of the class + fields=dict.keys(md.__dict__[group].__dict__) + + #Special treatment for the results + if str(group)=='results': + for supfield in fields:#looping on the different solutions + NCgroup.__setattr__('classtype', "results") + Subgroup=NCgroup.createGroup(str(supfield)) + Subgroup.__setattr__('classtype',str(supfield)) + if type(md.results.__dict__[supfield])==list:#the solution have several timestep + #get last timesteps and output frequency + last_step = numpy.size(md.results.__dict__[supfield]) + step_freq = md.settings.output_frequency + #grab first time step + subfields=dict.keys(md.results.__dict__[supfield].__getitem__(0).__dict__) + for field in subfields: + if str(field)!='errlog' and str(field)!='outlog' and str(field)!='SolutionType': + Var=md.results.__dict__[supfield].__getitem__(0).__dict__[field] + DimDict=CreateVar(NCData,Var,field,Subgroup,DimDict,True,last_step,step_freq,md,supfield) + + elif type(md.results.__dict__[supfield])==results:#only one timestep + subfields=dict.keys(md.results.__dict__[supfield].__dict__) + for field in subfields: + if str(field)!='errlog' and str(field)!='outlog' and str(field)!='SolutionType': + print 'Treating '+str(group)+'.'+str(supfield)+'.'+str(field) + Var=md.results.__dict__[supfield].__dict__[field] + DimDict=CreateVar(NCData,Var,field,NCgroup,DimDict,False) + else: + print 'Result format not suported' + else: + + for field in fields: + print 'Treating ' +str(group)+'.'+str(field) + NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) + Var=md.__dict__[group].__dict__[field] + DimDict=CreateVar(NCData,Var,field,NCgroup,DimDict,False) + NCData.close() + +#============================================================================ +#Define the variables +def CreateVar(NCData,var,field,Group,DimDict,istime,*step_args): + #grab type + try: + val_type=str(var.dtype) + except AttributeError: + val_type=type(var) + #grab dimension + try: + val_shape=dict.keys(var) + except TypeError: + val_shape=numpy.shape(var) + + + TypeDict = {float:'f8', + 'float64':'f8', + int:'i8', + 'int64':'i8'} + + val_dim=numpy.shape(val_shape)[0] + #Now define and fill up variable + #treating scalar string or bool as atribute + if val_type==str or val_type==bool: + Group.__setattr__(str(field), str(var)) + + #treating list as string table + #matlab does not recognise strings so we have to settle down with char arrays + elif val_type==list: + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),'S1',dimensions,zlib=True) + charvar=stringtochar(numpy.array(var)) + print charvar + print charvar.shape + for elt in range(0,val_dim): + try: + ncvar[elt] = charvar[elt] + except IndexError: + ncvar[0]= " " + #treating bool tables as string tables + elif val_type=='bool': + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),'S1',dimensions,zlib=True) + for elt in range(0,val_shape[0]): + ncvar[elt] = str(var[elt]) + #treating dictionaries as string tables of dim 2 + elif val_type==collections.OrderedDict: + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),'S1',dimensions,zlib=True) + for elt in range(0,val_dim): + ncvar[elt,0]=dict.keys(var)[elt] + ncvar[elt,1]=str(dict.values(var)[elt]) #converting to str to avoid potential problems + #Now dealing with numeric variables + else: + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),TypeDict[val_type],dimensions,zlib=True) + + if istime: + last=step_args[0] + freq=step_args[1] + md=step_args[2] + supfield=step_args[3] + vartab=var + for time in range(freq-1,last,freq): + if time!=0: + timevar=md.results.__dict__[supfield].__getitem__(time).__dict__[field] + print 'Treating results.'+str(supfield)+'.'+str(field)+' for time '+str(time) + vartab=numpy.column_stack((vartab,timevar)) + print numpy.shape(vartab) + try: + ncvar[:,:]=vartab[:,:] + except ValueError: + ncvar[:]=vartab.T[:] + else: + try: + nan_val=numpy.isnan(var) + if nan_val.all(): + ncvar [:] = 'NaN' + else: + ncvar[:] = var + except TypeError: #type does not accept nan, get vallue of the variable + ncvar[:] = var + return DimDict + +#============================================================================ +#retriev the dimension tuple from a dictionnary +def GetDim(NCData,var,shape,DimDict,i,istime): + output=[] + #grab type + try: + val_type=str(var.dtype) + except AttributeError: + val_type=type(var) + #grab dimension + for dim in range(0,i): #loop on the dimensions + if type(shape[0])==int: + try: + output=output+[str(DimDict[shape[dim]])] #test if the dimension allready exist + except KeyError: #if not create it + if (shape[dim])>1: + index=len(DimDict)+1 + NewDim=NCData.createDimension('Dimension'+str(index),(shape[dim])) + DimDict[len(NewDim)]='Dimension'+str(index) + output=output+[str(DimDict[shape[dim]])] + print 'Defining dimension ' +'Dimension'+str(index) + elif type(shape[0])==str:#dealling with a dictionnary + try: + output=[str(DimDict[numpy.shape(shape)[0]])]+['DictDim'] + except KeyError: + index=len(DimDict)+1 + NewDim=NCData.createDimension('Dimension'+str(index),numpy.shape(shape)[0]) + DimDict[len(NewDim)]='Dimension'+str(index) + output=[str(DimDict[numpy.shape(dict.keys(var))[0]])]+['Dimension6'] + print 'Defining dimension ' +'Dimension'+str(index) + break + if istime: + output=output+['Dimension4'] + #dealing with char and not string as we should so we need to had a string length + if val_type=='bool' or val_type==collections.OrderedDict or val_type==list: + charvar=stringtochar(numpy.array(var)) + stringlength=charvar.shape[charvar.ndim-1] + try: + output=output+[str(DimDict[stringlength])] #test if the dimension allready exist + except KeyError: #if not create it + if (shape[dim])>1: + index=len(DimDict)+1 + NewDim=NCData.createDimension('Dimension'+str(index),(stringlength)) + DimDict[len(NewDim)]='Dimension'+str(index) + output=output+[str(DimDict[stringlength])] + print 'Defining dimension ' +'Dimension'+str(index) + return tuple(output), DimDict Property changes on: ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py.bak ___________________________________________________________________ Added: svn:executable + * Index: ../trunk-jpl/src/py3/contrib/netCDF/ClassTry.py =================================================================== --- ../trunk-jpl/src/py3/contrib/netCDF/ClassTry.py (revision 0) +++ ../trunk-jpl/src/py3/contrib/netCDF/ClassTry.py (revision 19895) @@ -0,0 +1,122 @@ +#module imports {{{ +from netCDF4 import Dataset +import time +import collections +from os import path, remove +#}}} + + +class truc(object): + #properties + def __init__(self,*filename):#{{{ + + def netCDFread(filename): + def walktree(data): + keys = list(data.groups.keys()) + yield keys + for key in keys: + for children in walktree(data.groups[str(key)]): + yield children + + if path.exists(filename): + print(('Opening {} for reading '.format(filename))) + NCData=Dataset(filename, 'r') + class_dict={} + + for children in walktree(NCData): + for child in children: + class_dict[str(child)]=str(getattr(NCData.groups[str(child)],'classtype')) + + return class_dict + + if filename: + classtype=netCDFread(filename[0]) + else: + classtype=self.default_prop() + + module=list(map(__import__,dict.values(classtype))) + + for i,mod in enumerate(dict.keys(classtype)): + self.__dict__[mod] = getattr(module[i],str(classtype[str(mod)]))() + + #}}} + def default_prop(self): # {{{ + # ordered list of properties since vars(self) is random + return {'mesh':'mesh2d',\ + 'mask':'mask',\ + 'geometry':'geometry',\ + 'constants':'constants',\ + 'smb':'SMB',\ + 'basalforcings':'basalforcings',\ + 'materials':'matice',\ + 'damage':'damage',\ + 'friction':'friction',\ + 'flowequation':'flowequation',\ + 'timestepping':'timestepping',\ + 'initialization':'initialization',\ + 'rifts':'rifts',\ + 'debug':'debug',\ + 'verbose':'verbose',\ + 'settings':'settings',\ + 'toolkits':'toolkits',\ + 'cluster':'generic',\ + 'balancethickness':'balancethickness',\ + 'stressbalance':'stressbalance',\ + 'groundingline':'groundingline',\ + 'hydrology':'hydrologyshreve',\ + 'masstransport':'masstransport',\ + 'thermal':'thermal',\ + 'steadystate':'steadystate',\ + 'transient':'transient',\ + 'calving':'calving',\ + 'gia':'gia',\ + 'autodiff':'autodiff',\ + 'flaim':'flaim',\ + 'inversion':'inversion',\ + 'qmu':'qmu',\ + 'outputdefinition':'outputdefinition',\ + 'results':'results',\ + 'radaroverlay':'radaroverlay',\ + 'miscellaneous':'miscellaneous',\ + 'private':'private'} + # }}} + + def __repr__(obj): #{{{ + #print "Here %s the number: %d" % ("is", 37) + string="%19s: %-22s -- %s" % ("mesh","[%s,%s]" % ("1x1",obj.mesh.__class__.__name__),"mesh properties") + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("mask","[%s,%s]" % ("1x1",obj.mask.__class__.__name__),"defines grounded and floating elements")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("geometry","[%s,%s]" % ("1x1",obj.geometry.__class__.__name__),"surface elevation, bedrock topography, ice thickness,...")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("constants","[%s,%s]" % ("1x1",obj.constants.__class__.__name__),"physical constants")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("smb","[%s,%s]" % ("1x1",obj.smb.__class__.__name__),"surface forcings")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("basalforcings","[%s,%s]" % ("1x1",obj.basalforcings.__class__.__name__),"bed forcings")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("materials","[%s,%s]" % ("1x1",obj.materials.__class__.__name__),"material properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("damage","[%s,%s]" % ("1x1",obj.damage.__class__.__name__),"damage propagation laws")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("friction","[%s,%s]" % ("1x1",obj.friction.__class__.__name__),"basal friction/drag properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("flowequation","[%s,%s]" % ("1x1",obj.flowequation.__class__.__name__),"flow equations")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("timestepping","[%s,%s]" % ("1x1",obj.timestepping.__class__.__name__),"time stepping for transient models")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("initialization","[%s,%s]" % ("1x1",obj.initialization.__class__.__name__),"initial guess/state")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("rifts","[%s,%s]" % ("1x1",obj.rifts.__class__.__name__),"rifts properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("debug","[%s,%s]" % ("1x1",obj.debug.__class__.__name__),"debugging tools (valgrind, gprof)")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("verbose","[%s,%s]" % ("1x1",obj.verbose.__class__.__name__),"verbosity level in solve")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("settings","[%s,%s]" % ("1x1",obj.settings.__class__.__name__),"settings properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("toolkits","[%s,%s]" % ("1x1",obj.toolkits.__class__.__name__),"PETSc options for each solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("cluster","[%s,%s]" % ("1x1",obj.cluster.__class__.__name__),"cluster parameters (number of cpus...)")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("balancethickness","[%s,%s]" % ("1x1",obj.balancethickness.__class__.__name__),"parameters for balancethickness solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("stressbalance","[%s,%s]" % ("1x1",obj.stressbalance.__class__.__name__),"parameters for stressbalance solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("groundingline","[%s,%s]" % ("1x1",obj.groundingline.__class__.__name__),"parameters for groundingline solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("hydrology","[%s,%s]" % ("1x1",obj.hydrology.__class__.__name__),"parameters for hydrology solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("masstransport","[%s,%s]" % ("1x1",obj.masstransport.__class__.__name__),"parameters for masstransport solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("thermal","[%s,%s]" % ("1x1",obj.thermal.__class__.__name__),"parameters for thermal solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("steadystate","[%s,%s]" % ("1x1",obj.steadystate.__class__.__name__),"parameters for steadystate solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("transient","[%s,%s]" % ("1x1",obj.transient.__class__.__name__),"parameters for transient solution")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("calving","[%s,%s]" % ("1x1",obj.calving.__class__.__name__),"parameters for calving")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("autodiff","[%s,%s]" % ("1x1",obj.autodiff.__class__.__name__),"automatic differentiation parameters")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("flaim","[%s,%s]" % ("1x1",obj.flaim.__class__.__name__),"flaim parameters")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("inversion","[%s,%s]" % ("1x1",obj.inversion.__class__.__name__),"parameters for inverse methods")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("qmu","[%s,%s]" % ("1x1",obj.qmu.__class__.__name__),"dakota properties")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("outputdefinition","[%s,%s]" % ("1x1",obj.outputdefinition.__class__.__name__),"output definition")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("results","[%s,%s]" % ("1x1",obj.results.__class__.__name__),"model results")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("radaroverlay","[%s,%s]" % ("1x1",obj.radaroverlay.__class__.__name__),"radar image for plot overlay")) + string="%s\n%s" % (string,"%19s: %-22s -- %s" % ("miscellaneous","[%s,%s]" % ("1x1",obj.miscellaneous.__class__.__name__),"miscellaneous fields")) + return string + # }}} Index: ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py =================================================================== --- ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py (revision 0) +++ ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py (revision 19895) @@ -0,0 +1,218 @@ +from netCDF4 import Dataset, stringtochar +import numpy +import time +import collections +from mesh2d import * +from mesh3dprisms import * +from results import * +from os import path, remove + +def export_netCDF(md,filename): + #Now going on Real treatment + if path.exists(filename): + print(('File {} allready exist'.format(filename))) + newname=input('Give a new name or "delete" to replace: ') + if newname=='delete': + remove(filename) + else: + print(('New file name is {}'.format(newname))) + filename=newname + + NCData=Dataset(filename, 'w', format='NETCDF4') + NCData.description = 'Results for run' + md.miscellaneous.name + NCData.history = 'Created ' + time.ctime(time.time()) + + #gather geometry and timestepping as dimensions + Duration=md.timestepping.final_time-md.timestepping.start_time + if Duration>0 and md.timestepping.time_step*md.settings.output_frequency>0: + StepNum=Duration/(md.timestepping.time_step*md.settings.output_frequency) + else: + StepNum=1 + + Dimension1=NCData.createDimension('Dimension1',md.mesh.numberofelements) + Dimension2=NCData.createDimension('Dimension2',md.mesh.numberofvertices) + Dimension3=NCData.createDimension('Dimension3',numpy.shape(md.mesh.elements)[1]) + Dimension4=NCData.createDimension('Dimension4',StepNum) + Dimension5=NCData.createDimension('Dimension5',40) + Dimension6=NCData.createDimension('Dimension6',2) + + DimDict = {len(Dimension1):'Dimension1', + len(Dimension2):'Dimension2', + len(Dimension3):'Dimension3', + len(Dimension4):'Dimension4', + len(Dimension5):'Dimension5', + len(Dimension6):'Dimension6'} + + #get all model classes and create respective groups + groups=dict.keys(md.__dict__) + for group in groups: + NCgroup=NCData.createGroup(str(group)) + #In each group gather the fields of the class + fields=dict.keys(md.__dict__[group].__dict__) + + #Special treatment for the results + if str(group)=='results': + for supfield in fields:#looping on the different solutions + NCgroup.__setattr__('classtype', "results") + Subgroup=NCgroup.createGroup(str(supfield)) + Subgroup.__setattr__('classtype',str(supfield)) + if type(md.results.__dict__[supfield])==list:#the solution have several timestep + #get last timesteps and output frequency + last_step = numpy.size(md.results.__dict__[supfield]) + step_freq = md.settings.output_frequency + #grab first time step + subfields=dict.keys(md.results.__dict__[supfield].__getitem__(0).__dict__) + for field in subfields: + if str(field)!='errlog' and str(field)!='outlog' and str(field)!='SolutionType': + Var=md.results.__dict__[supfield].__getitem__(0).__dict__[field] + DimDict=CreateVar(NCData,Var,field,Subgroup,DimDict,True,last_step,step_freq,md,supfield) + + elif type(md.results.__dict__[supfield])==results:#only one timestep + subfields=dict.keys(md.results.__dict__[supfield].__dict__) + for field in subfields: + if str(field)!='errlog' and str(field)!='outlog' and str(field)!='SolutionType': + print('Treating '+str(group)+'.'+str(supfield)+'.'+str(field)) + Var=md.results.__dict__[supfield].__dict__[field] + DimDict=CreateVar(NCData,Var,field,NCgroup,DimDict,False) + else: + print('Result format not suported') + else: + + for field in fields: + print('Treating ' +str(group)+'.'+str(field)) + NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__) + Var=md.__dict__[group].__dict__[field] + DimDict=CreateVar(NCData,Var,field,NCgroup,DimDict,False) + NCData.close() + +#============================================================================ +#Define the variables +def CreateVar(NCData,var,field,Group,DimDict,istime,*step_args): + #grab type + try: + val_type=str(var.dtype) + except AttributeError: + val_type=type(var) + #grab dimension + try: + val_shape=dict.keys(var) + except TypeError: + val_shape=numpy.shape(var) + + + TypeDict = {float:'f8', + 'float64':'f8', + int:'i8', + 'int64':'i8'} + + val_dim=numpy.shape(val_shape)[0] + #Now define and fill up variable + #treating scalar string or bool as atribute + if val_type==str or val_type==bool: + Group.__setattr__(str(field), str(var)) + + #treating list as string table + #matlab does not recognise strings so we have to settle down with char arrays + elif val_type==list: + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),'S1',dimensions,zlib=True) + charvar=stringtochar(numpy.array(var)) + print(charvar) + print(charvar.shape) + for elt in range(0,val_dim): + try: + ncvar[elt] = charvar[elt] + except IndexError: + ncvar[0]= " " + #treating bool tables as string tables + elif val_type=='bool': + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),'S1',dimensions,zlib=True) + for elt in range(0,val_shape[0]): + ncvar[elt] = str(var[elt]) + #treating dictionaries as string tables of dim 2 + elif val_type==collections.OrderedDict: + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),'S1',dimensions,zlib=True) + for elt in range(0,val_dim): + ncvar[elt,0]=dict.keys(var)[elt] + ncvar[elt,1]=str(dict.values(var)[elt]) #converting to str to avoid potential problems + #Now dealing with numeric variables + else: + dimensions,DimDict=GetDim(NCData,var,val_shape,DimDict,val_dim,istime) + ncvar = Group.createVariable(str(field),TypeDict[val_type],dimensions,zlib=True) + + if istime: + last=step_args[0] + freq=step_args[1] + md=step_args[2] + supfield=step_args[3] + vartab=var + for time in range(freq-1,last,freq): + if time!=0: + timevar=md.results.__dict__[supfield].__getitem__(time).__dict__[field] + print('Treating results.'+str(supfield)+'.'+str(field)+' for time '+str(time)) + vartab=numpy.column_stack((vartab,timevar)) + print(numpy.shape(vartab)) + try: + ncvar[:,:]=vartab[:,:] + except ValueError: + ncvar[:]=vartab.T[:] + else: + try: + nan_val=numpy.isnan(var) + if nan_val.all(): + ncvar [:] = 'NaN' + else: + ncvar[:] = var + except TypeError: #type does not accept nan, get vallue of the variable + ncvar[:] = var + return DimDict + +#============================================================================ +#retriev the dimension tuple from a dictionnary +def GetDim(NCData,var,shape,DimDict,i,istime): + output=[] + #grab type + try: + val_type=str(var.dtype) + except AttributeError: + val_type=type(var) + #grab dimension + for dim in range(0,i): #loop on the dimensions + if type(shape[0])==int: + try: + output=output+[str(DimDict[shape[dim]])] #test if the dimension allready exist + except KeyError: #if not create it + if (shape[dim])>1: + index=len(DimDict)+1 + NewDim=NCData.createDimension('Dimension'+str(index),(shape[dim])) + DimDict[len(NewDim)]='Dimension'+str(index) + output=output+[str(DimDict[shape[dim]])] + print('Defining dimension ' +'Dimension'+str(index)) + elif type(shape[0])==str:#dealling with a dictionnary + try: + output=[str(DimDict[numpy.shape(shape)[0]])]+['DictDim'] + except KeyError: + index=len(DimDict)+1 + NewDim=NCData.createDimension('Dimension'+str(index),numpy.shape(shape)[0]) + DimDict[len(NewDim)]='Dimension'+str(index) + output=[str(DimDict[numpy.shape(dict.keys(var))[0]])]+['Dimension6'] + print('Defining dimension ' +'Dimension'+str(index)) + break + if istime: + output=output+['Dimension4'] + #dealing with char and not string as we should so we need to had a string length + if val_type=='bool' or val_type==collections.OrderedDict or val_type==list: + charvar=stringtochar(numpy.array(var)) + stringlength=charvar.shape[charvar.ndim-1] + try: + output=output+[str(DimDict[stringlength])] #test if the dimension allready exist + except KeyError: #if not create it + if (shape[dim])>1: + index=len(DimDict)+1 + NewDim=NCData.createDimension('Dimension'+str(index),(stringlength)) + DimDict[len(NewDim)]='Dimension'+str(index) + output=output+[str(DimDict[stringlength])] + print('Defining dimension ' +'Dimension'+str(index)) + return tuple(output), DimDict Property changes on: ../trunk-jpl/src/py3/contrib/netCDF/export_netCDF.py ___________________________________________________________________ Added: svn:executable + * Index: ../trunk-jpl/src/py3/contrib/bamg/YamsCall.py.bak =================================================================== --- ../trunk-jpl/src/py3/contrib/bamg/YamsCall.py.bak (revision 0) +++ ../trunk-jpl/src/py3/contrib/bamg/YamsCall.py.bak (revision 19895) @@ -0,0 +1,123 @@ +import numpy +import time +import subprocess +import os +from ComputeHessian import ComputeHessian +from ComputeMetric import ComputeMetric + +def YamsCall(md,field,hmin,hmax,gradation,epsilon): + """ + YAMSCALL - call yams + + build a metric using the Hessian of the given field + call Yams and the output mesh is plugged onto the model + -hmin = minimum edge length (m) + -hmax = maximum edge length (m) + -gradation = maximum edge length gradation between 2 elements + -epsilon = average error on each element (m/yr) + + Usage: + md=YamsCall(md,field,hmin,hmax,gradation,epsilon); + + Example: + md=YamsCall(md,md.inversion.vel_obs,1500,10^8,1.3,0.9); + """ + + #2d geometric parameter (do not change) + scale=2./9. + + #Compute Hessian + t1=time.time() + print "%s" % ' computing Hessian...' + hessian=ComputeHessian(md.mesh.elements,md.mesh.x,md.mesh.y,field,'node') + t2=time.time() + print "%s%d%s\n" % (' done (',t2-t1,' seconds)') + + #Compute metric + t1=time.time() + print "%s" % ' computing metric...' + metric=ComputeMetric(hessian,scale,epsilon,hmin,hmax,numpy.empty(0,int)) + t2=time.time() + print "%s%d%s\n" % (' done (',t2-t1,' seconds)') + + #write files + t1=time.time() + print "%s" % ' writing initial mesh files...' + numpy.savetxt('carre0.met',metric) + + f=open('carre0.mesh','w') + + #initialiation + f.write("\n%s\n%i\n" % ('MeshVersionFormatted',1)) + + #dimension + f.write("\n%s\n%i\n" % ('Dimension',2)) + + #Vertices + f.write("\n%s\n%i\n\n" % ('Vertices',md.mesh.numberofvertices)) + for i in xrange(0,md.mesh.numberofvertices): + f.write("%8g %8g %i\n" % (md.mesh.x[i],md.mesh.y[i],0)) + + #Triangles + f.write("\n\n%s\n%i\n\n" % ('Triangles',md.mesh.numberofelements)) + for i in xrange(0,md.mesh.numberofelements): + f.write("%i %i %i %i\n" % (md.mesh.elements[i,0],md.mesh.elements[i,1],md.mesh.elements[i,2],0)) + numberofelements1=md.mesh.numberofelements + + #Deal with rifts + if numpy.any(not numpy.isnan(md.rifts.riftstruct)): + + #we have the list of triangles that make up the rift. keep those triangles around during refinement. + triangles=numpy.empty(0,int) + for riftstruct in md.rifts.riftstruct: + triangles=numpy.concatenate((triangles,riftstruct.segments[:,2])) + + f.write("\n\n%s\n%i\n\n" % ('RequiredTriangles',numpy.size(triangles))) + for triangle in triangles: + f.write("%i\n" % triangle) + + #close + f.close() + t2=time.time() + print "%s%d%s\n" % (' done (',t2-t1,' seconds)') + + #call yams + print "%s\n" % ' call Yams...' + if m.ispc(): + #windows + subprocess.call('yams2-win -O 1 -v -0 -ecp -hgrad %g carre0 carre1' % gradation,shell=True) + elif ismac(): + #Macosx + subprocess.call('yams2-osx -O 1 -v -0 -ecp -hgrad %g carre0 carre1' % gradation,shell=True) + else: + #Linux + subprocess.call('yams2-linux -O 1 -v -0 -ecp -hgrad %g carre0 carre1' % gradation,shell=True) + + #plug new mesh + t1=time.time() + print "\n%s" % ' reading final mesh files...' + Tria=numpy.loadtxt('carre1.tria',int) + Coor=numpy.loadtxt('carre1.coor',float) + md.mesh.x=Coor[:,0] + md.mesh.y=Coor[:,1] + md.mesh.z=numpy.zeros((numpy.size(Coor,axis=0),1)) + md.mesh.elements=Tria + md.mesh.numberofvertices=numpy.size(Coor,axis=0) + md.mesh.numberofelements=numpy.size(Tria,axis=0) + numberofelements2=md.mesh.numberofelements + t2=time.time() + print "%s%d%s\n\n" % (' done (',t2-t1,' seconds)') + + #display number of elements + print "\n%s %i" % (' inital number of elements:',numberofelements1) + print "\n%s %i\n\n" % (' new number of elements:',numberofelements2) + + #clean up: + os.remove('carre0.mesh') + os.remove('carre0.met') + os.remove('carre1.tria') + os.remove('carre1.coor') + os.remove('carre1.meshb') + + return md + Index: ../trunk-jpl/src/py3/contrib/bamg/YamsCall.py =================================================================== --- ../trunk-jpl/src/py3/contrib/bamg/YamsCall.py (revision 0) +++ ../trunk-jpl/src/py3/contrib/bamg/YamsCall.py (revision 19895) @@ -0,0 +1,123 @@ +import numpy +import time +import subprocess +import os +from ComputeHessian import ComputeHessian +from ComputeMetric import ComputeMetric + +def YamsCall(md,field,hmin,hmax,gradation,epsilon): + """ + YAMSCALL - call yams + + build a metric using the Hessian of the given field + call Yams and the output mesh is plugged onto the model + -hmin = minimum edge length (m) + -hmax = maximum edge length (m) + -gradation = maximum edge length gradation between 2 elements + -epsilon = average error on each element (m/yr) + + Usage: + md=YamsCall(md,field,hmin,hmax,gradation,epsilon); + + Example: + md=YamsCall(md,md.inversion.vel_obs,1500,10^8,1.3,0.9); + """ + + #2d geometric parameter (do not change) + scale=2./9. + + #Compute Hessian + t1=time.time() + print("%s" % ' computing Hessian...') + hessian=ComputeHessian(md.mesh.elements,md.mesh.x,md.mesh.y,field,'node') + t2=time.time() + print("%s%d%s\n" % (' done (',t2-t1,' seconds)')) + + #Compute metric + t1=time.time() + print("%s" % ' computing metric...') + metric=ComputeMetric(hessian,scale,epsilon,hmin,hmax,numpy.empty(0,int)) + t2=time.time() + print("%s%d%s\n" % (' done (',t2-t1,' seconds)')) + + #write files + t1=time.time() + print("%s" % ' writing initial mesh files...') + numpy.savetxt('carre0.met',metric) + + f=open('carre0.mesh','w') + + #initialiation + f.write("\n%s\n%i\n" % ('MeshVersionFormatted',1)) + + #dimension + f.write("\n%s\n%i\n" % ('Dimension',2)) + + #Vertices + f.write("\n%s\n%i\n\n" % ('Vertices',md.mesh.numberofvertices)) + for i in range(0,md.mesh.numberofvertices): + f.write("%8g %8g %i\n" % (md.mesh.x[i],md.mesh.y[i],0)) + + #Triangles + f.write("\n\n%s\n%i\n\n" % ('Triangles',md.mesh.numberofelements)) + for i in range(0,md.mesh.numberofelements): + f.write("%i %i %i %i\n" % (md.mesh.elements[i,0],md.mesh.elements[i,1],md.mesh.elements[i,2],0)) + numberofelements1=md.mesh.numberofelements + + #Deal with rifts + if numpy.any(not numpy.isnan(md.rifts.riftstruct)): + + #we have the list of triangles that make up the rift. keep those triangles around during refinement. + triangles=numpy.empty(0,int) + for riftstruct in md.rifts.riftstruct: + triangles=numpy.concatenate((triangles,riftstruct.segments[:,2])) + + f.write("\n\n%s\n%i\n\n" % ('RequiredTriangles',numpy.size(triangles))) + for triangle in triangles: + f.write("%i\n" % triangle) + + #close + f.close() + t2=time.time() + print("%s%d%s\n" % (' done (',t2-t1,' seconds)')) + + #call yams + print("%s\n" % ' call Yams...') + if m.ispc(): + #windows + subprocess.call('yams2-win -O 1 -v -0 -ecp -hgrad %g carre0 carre1' % gradation,shell=True) + elif ismac(): + #Macosx + subprocess.call('yams2-osx -O 1 -v -0 -ecp -hgrad %g carre0 carre1' % gradation,shell=True) + else: + #Linux + subprocess.call('yams2-linux -O 1 -v -0 -ecp -hgrad %g carre0 carre1' % gradation,shell=True) + + #plug new mesh + t1=time.time() + print("\n%s" % ' reading final mesh files...') + Tria=numpy.loadtxt('carre1.tria',int) + Coor=numpy.loadtxt('carre1.coor',float) + md.mesh.x=Coor[:,0] + md.mesh.y=Coor[:,1] + md.mesh.z=numpy.zeros((numpy.size(Coor,axis=0),1)) + md.mesh.elements=Tria + md.mesh.numberofvertices=numpy.size(Coor,axis=0) + md.mesh.numberofelements=numpy.size(Tria,axis=0) + numberofelements2=md.mesh.numberofelements + t2=time.time() + print("%s%d%s\n\n" % (' done (',t2-t1,' seconds)')) + + #display number of elements + print("\n%s %i" % (' inital number of elements:',numberofelements1)) + print("\n%s %i\n\n" % (' new number of elements:',numberofelements2)) + + #clean up: + os.remove('carre0.mesh') + os.remove('carre0.met') + os.remove('carre1.tria') + os.remove('carre1.coor') + os.remove('carre1.meshb') + + return md + Index: ../trunk-jpl/src/py3/contrib/paraview/exportVTK.py.bak =================================================================== --- ../trunk-jpl/src/py3/contrib/paraview/exportVTK.py.bak (revision 0) +++ ../trunk-jpl/src/py3/contrib/paraview/exportVTK.py.bak (revision 19895) @@ -0,0 +1,163 @@ +import numpy +import os +import model +import glob +def exportVTK(filename,model,*args): + ''' + vtk export + function exportVTK(filename,model) + creates a directory with the vtk files for displays in paraview + (only work for triangle and wedges based on their number of nodes) + + Give only the results for nw but could be extended to geometry, mask... + + input: filename destination + (string) + ------------------------------------------------------------------ +model this is md + ------------------------------------------------------------------ + By default only the results are exported, you can add whichever + field you need as a string: + add 'geometry' to export md.geometry + + Basile de Fleurian: + ''' + Dir=os.path.basename(filename) + Path=filename[:-len(Dir)] + + if os.path.exists(filename): + print ('File {} allready exist'.format(filename)) + newname=raw_input('Give a new name or "delete" to replace: ') + if newname=='delete': + filelist = glob.glob(filename+'/*') + for oldfile in filelist: + os.remove(oldfile) + else: + print ('New file name is {}'.format(newname)) + filename=newname + os.mkdir(filename) + else: + os.mkdir(filename) + + #get the element related variables + if 'z' in dict.keys(model.mesh.__dict__): + points=numpy.column_stack((model.mesh.x,model.mesh.y,model.mesh.z)) + dim=3 + else: + points=numpy.column_stack((model.mesh.x,model.mesh.y,numpy.zeros(numpy.shape(model.mesh.x)))) + dim=2 + + num_of_points=numpy.size(model.mesh.x) + num_of_elt=numpy.shape(model.mesh.elements)[0] + point_per_elt=numpy.shape(model.mesh.elements)[1] + + #Select the type of element function of the number of nodes per elements + if point_per_elt==3: + celltype=5 #triangles + elif point_per_elt==6: + celltype=13 #wedges + else: + error('Your Element definition is not taken into account \n') + + #this is the result structure + res_struct=model.results + if (len(res_struct.__dict__)>0): + #Getting all the solutions of the model + solnames=(dict.keys(res_struct.__dict__)) + num_of_sols=len(solnames) + num_of_timesteps=1 + out_freq=model.settings.output_frequency + #%building solutionstructure + for solution in solnames: + #looking for multiple time steps + if (numpy.size(res_struct.__dict__[solution])>num_of_timesteps): + num_of_timesteps=numpy.size(res_struct.__dict__[solution]) + num_of_timesteps=int(num_of_timesteps/out_freq)+1 + else: + num_of_timesteps=1 + + for step in range(0,num_of_timesteps): + timestep=step + fid=open((filename +'/Timestep.vtk'+str(timestep)+'.vtk'),'w+') + fid.write('# vtk DataFile Version 2.0 \n') + fid.write('Data for run %s \n' % model.miscellaneous.name) + fid.write('ASCII \n') + fid.write('DATASET UNSTRUCTURED_GRID \n') + fid.write('POINTS %d float\n' % num_of_points) + if(dim==3): + for point in points: + fid.write('%f %f %f \n'%(point[0], point[1], point[2])) + elif(dim==2): + for point in points: + fid.write('%f %f %f \n'%(point[0], point[1], point[2])) + + fid.write('CELLS %d %d\n' %(num_of_elt, num_of_elt*(point_per_elt+1))) + + if point_per_elt==3: + for elt in range(0, num_of_elt): + fid.write('3 %d %d %d\n' %(model.mesh.elements[elt,0]-1,model.mesh.elements[elt,1]-1,model.mesh.elements[elt,2]-1)) + elif point_per_elt==6: + for elt in range(0, num_of_elt): + fid.write('6 %d %d %d %d %d %d\n' %(model.mesh.elements[elt,0]-1,model.mesh.elements[elt,1]-1,model.mesh.elements[elt,2]-1,model.mesh.elements[elt,3]-1,model.mesh.elements[elt,4]-1,model.mesh.elements[elt,5]-1)) + else: + print 'Number of nodes per element not supported' + + fid.write('CELL_TYPES %d\n' %num_of_elt) + for elt in range(0, num_of_elt): + fid.write('%d\n' %celltype) + + fid.write('POINT_DATA %s \n' %str(num_of_points)) + + #loop over the different solution structures + if 'solnames' in locals(): + for sol in solnames: + #dealing with results on different timesteps + if(numpy.size(res_struct.__dict__[sol])>timestep): + timestep = step + else: + timestep = numpy.size(res_struct.__dict__[sol]) + + #getting the fields in the solution + if(numpy.size(res_struct.__dict__[sol])>1): + fieldnames=dict.keys(res_struct.__dict__[sol].__getitem__(timestep*out_freq-1).__dict__) + else: + fieldnames=dict.keys(res_struct.__dict__[sol].__dict__) + #check which field is a real result and print + for field in fieldnames: + if(numpy.size(res_struct.__dict__[sol])>1): + fieldstruct=res_struct.__dict__[sol].__getitem__(timestep*out_freq-1).__dict__[field] + else: + fieldstruct=res_struct.__dict__[sol].__dict__[field] + + if ((numpy.size(fieldstruct))==num_of_points): + fid.write('SCALARS %s float 1 \n' % field) + fid.write('LOOKUP_TABLE default\n') + for node in range(0,num_of_points): + #paraview does not like NaN, replacing + if numpy.isnan(fieldstruct[node]): + fid.write('%e\n' % -9999.9999) + #also checking for verry small value that mess up + elif (abs(fieldstruct[node])<1.0e-20): + fid.write('%e\n' % 0.0) + else: + fid.write('%e\n' % fieldstruct[node]) + + #loop on arguments, if something other than result is asked, do + #it now + for other in args: + other_struct=model.__dict__[other] + othernames=(dict.keys(other_struct.__dict__)) + for field in othernames: + if ((numpy.size(other_struct.__dict__[field]))==num_of_points): + fid.write('SCALARS %s float 1 \n' % field) + fid.write('LOOKUP_TABLE default\n') + for node in range(0,num_of_points): + #paraview does not like NaN, replacing + if numpy.isnan(other_struct.__dict__[field][node]): + fid.write('%e\n' % -9999.9999) + #also checking for verry small value that mess up + elif (abs(other_struct.__dict__[field][node])<1.0e-20): + fid.write('%e\n' % 0.0) + else: + fid.write('%e\n' % other_struct.__dict__[field][node]) + fid.close(); Index: ../trunk-jpl/src/py3/contrib/paraview/exportVTK.py =================================================================== --- ../trunk-jpl/src/py3/contrib/paraview/exportVTK.py (revision 0) +++ ../trunk-jpl/src/py3/contrib/paraview/exportVTK.py (revision 19895) @@ -0,0 +1,163 @@ +import numpy +import os +import model +import glob +def exportVTK(filename,model,*args): + ''' + vtk export + function exportVTK(filename,model) + creates a directory with the vtk files for displays in paraview + (only work for triangle and wedges based on their number of nodes) + + Give only the results for nw but could be extended to geometry, mask... + + input: filename destination + (string) + ------------------------------------------------------------------ +model this is md + ------------------------------------------------------------------ + By default only the results are exported, you can add whichever + field you need as a string: + add 'geometry' to export md.geometry + + Basile de Fleurian: + ''' + Dir=os.path.basename(filename) + Path=filename[:-len(Dir)] + + if os.path.exists(filename): + print(('File {} allready exist'.format(filename))) + newname=input('Give a new name or "delete" to replace: ') + if newname=='delete': + filelist = glob.glob(filename+'/*') + for oldfile in filelist: + os.remove(oldfile) + else: + print(('New file name is {}'.format(newname))) + filename=newname + os.mkdir(filename) + else: + os.mkdir(filename) + + #get the element related variables + if 'z' in dict.keys(model.mesh.__dict__): + points=numpy.column_stack((model.mesh.x,model.mesh.y,model.mesh.z)) + dim=3 + else: + points=numpy.column_stack((model.mesh.x,model.mesh.y,numpy.zeros(numpy.shape(model.mesh.x)))) + dim=2 + + num_of_points=numpy.size(model.mesh.x) + num_of_elt=numpy.shape(model.mesh.elements)[0] + point_per_elt=numpy.shape(model.mesh.elements)[1] + + #Select the type of element function of the number of nodes per elements + if point_per_elt==3: + celltype=5 #triangles + elif point_per_elt==6: + celltype=13 #wedges + else: + error('Your Element definition is not taken into account \n') + + #this is the result structure + res_struct=model.results + if (len(res_struct.__dict__)>0): + #Getting all the solutions of the model + solnames=(dict.keys(res_struct.__dict__)) + num_of_sols=len(solnames) + num_of_timesteps=1 + out_freq=model.settings.output_frequency + #%building solutionstructure + for solution in solnames: + #looking for multiple time steps + if (numpy.size(res_struct.__dict__[solution])>num_of_timesteps): + num_of_timesteps=numpy.size(res_struct.__dict__[solution]) + num_of_timesteps=int(num_of_timesteps/out_freq)+1 + else: + num_of_timesteps=1 + + for step in range(0,num_of_timesteps): + timestep=step + fid=open((filename +'/Timestep.vtk'+str(timestep)+'.vtk'),'w+') + fid.write('# vtk DataFile Version 2.0 \n') + fid.write('Data for run %s \n' % model.miscellaneous.name) + fid.write('ASCII \n') + fid.write('DATASET UNSTRUCTURED_GRID \n') + fid.write('POINTS %d float\n' % num_of_points) + if(dim==3): + for point in points: + fid.write('%f %f %f \n'%(point[0], point[1], point[2])) + elif(dim==2): + for point in points: + fid.write('%f %f %f \n'%(point[0], point[1], point[2])) + + fid.write('CELLS %d %d\n' %(num_of_elt, num_of_elt*(point_per_elt+1))) + + if point_per_elt==3: + for elt in range(0, num_of_elt): + fid.write('3 %d %d %d\n' %(model.mesh.elements[elt,0]-1,model.mesh.elements[elt,1]-1,model.mesh.elements[elt,2]-1)) + elif point_per_elt==6: + for elt in range(0, num_of_elt): + fid.write('6 %d %d %d %d %d %d\n' %(model.mesh.elements[elt,0]-1,model.mesh.elements[elt,1]-1,model.mesh.elements[elt,2]-1,model.mesh.elements[elt,3]-1,model.mesh.elements[elt,4]-1,model.mesh.elements[elt,5]-1)) + else: + print('Number of nodes per element not supported') + + fid.write('CELL_TYPES %d\n' %num_of_elt) + for elt in range(0, num_of_elt): + fid.write('%d\n' %celltype) + + fid.write('POINT_DATA %s \n' %str(num_of_points)) + + #loop over the different solution structures + if 'solnames' in locals(): + for sol in solnames: + #dealing with results on different timesteps + if(numpy.size(res_struct.__dict__[sol])>timestep): + timestep = step + else: + timestep = numpy.size(res_struct.__dict__[sol]) + + #getting the fields in the solution + if(numpy.size(res_struct.__dict__[sol])>1): + fieldnames=dict.keys(res_struct.__dict__[sol].__getitem__(timestep*out_freq-1).__dict__) + else: + fieldnames=dict.keys(res_struct.__dict__[sol].__dict__) + #check which field is a real result and print + for field in fieldnames: + if(numpy.size(res_struct.__dict__[sol])>1): + fieldstruct=res_struct.__dict__[sol].__getitem__(timestep*out_freq-1).__dict__[field] + else: + fieldstruct=res_struct.__dict__[sol].__dict__[field] + + if ((numpy.size(fieldstruct))==num_of_points): + fid.write('SCALARS %s float 1 \n' % field) + fid.write('LOOKUP_TABLE default\n') + for node in range(0,num_of_points): + #paraview does not like NaN, replacing + if numpy.isnan(fieldstruct[node]): + fid.write('%e\n' % -9999.9999) + #also checking for verry small value that mess up + elif (abs(fieldstruct[node])<1.0e-20): + fid.write('%e\n' % 0.0) + else: + fid.write('%e\n' % fieldstruct[node]) + + #loop on arguments, if something other than result is asked, do + #it now + for other in args: + other_struct=model.__dict__[other] + othernames=(dict.keys(other_struct.__dict__)) + for field in othernames: + if ((numpy.size(other_struct.__dict__[field]))==num_of_points): + fid.write('SCALARS %s float 1 \n' % field) + fid.write('LOOKUP_TABLE default\n') + for node in range(0,num_of_points): + #paraview does not like NaN, replacing + if numpy.isnan(other_struct.__dict__[field][node]): + fid.write('%e\n' % -9999.9999) + #also checking for verry small value that mess up + elif (abs(other_struct.__dict__[field][node])<1.0e-20): + fid.write('%e\n' % 0.0) + else: + fid.write('%e\n' % other_struct.__dict__[field][node]) + fid.close(); Index: ../trunk-jpl/src/py3/extrusion/project2d.py =================================================================== --- ../trunk-jpl/src/py3/extrusion/project2d.py (revision 0) +++ ../trunk-jpl/src/py3/extrusion/project2d.py (revision 19895) @@ -0,0 +1,49 @@ +import numpy as npy + +def project2d(md3d,value,layer): + ''' + returns the value of a field for a given layer of the mesh + + + returns the value of a vector for a given layer from extruded mesh onto the 2d mesh + used to do the extrusion. This function is used to compare values between different + layers of a 3d mesh. + + Usage: + projection_value=project2d(md3d,value,layer) + + Example: + vel2=project2d(md3d,md3d.initialization.vel,2); + returns the velocity of the second layer (1 is the base) + ''' + + if md3d.mesh.domaintype().lower() != '3d': + raise Exception("model passed to project2d function should be 3D") + + if layer<1 or layer>md3d.mesh.numberoflayers: + raise ValueError("layer must be between 0 and %i" % md3d.mesh.numberoflayers) + + # coerce to array in case float is passed + if type(value)!=npy.ndarray: + print('coercing array') + value=npy.array(value) + + vec2d=False + if value.ndim==2 and value.shape[1]==1: + value=value.reshape(-1,) + vec2d=True + + if value.size==1: + projection_value=value[(layer-1)*md3d.mesh.numberofelements2d:layer*md3d.mesh.numberofelements2d] + elif value.shape[0]==md3d.mesh.numberofvertices: + #print 'indices: ', (layer-1)*md3d.mesh.numberofvertices2d, layer*md3d.mesh.numberofvertices2d + projection_value=value[(layer-1)*md3d.mesh.numberofvertices2d:layer*md3d.mesh.numberofvertices2d] + elif value.shape[0]==md3d.mesh.numberofvertices+1: + projection_value=[value[(layer-1)*md3d.mesh.numberofvertices2d:layer*md3d.mesh.numberofvertices2d], value[-1]] + else: + projection_value=value[(layer-1)*md3d.mesh.numberofelements2d:layer*md3d.mesh.numberofelements2d] + + if vec2d: + projection_value=projection_value.reshape(-1,1) + + return projection_value Index: ../trunk-jpl/src/py3/extrusion/project3d.py =================================================================== --- ../trunk-jpl/src/py3/extrusion/project3d.py (revision 0) +++ ../trunk-jpl/src/py3/extrusion/project3d.py (revision 19895) @@ -0,0 +1,90 @@ +import numpy +from pairoptions import pairoptions + +def project3d(md,**kwargs): + """ + PROJECT3D - vertically project a vector from 2d mesh + + vertically project a vector from 2d mesh (split in noncoll and coll areas) into a 3d mesh. + This vector can be a node vector of size (md.mesh.numberofvertices2d,N/A) or an + element vector of size (md.mesh.numberofelements2d,N/A). + arguments: + 'vector': 2d vector + 'type': 'element' or 'node'. + options: + 'layer' a layer number where vector should keep its values. If not specified, all layers adopt the + value of the 2d vector. + 'padding': default to 0 (value adopted by other 3d layers not being projected + + Examples: + extruded_vector=project3d(md,'vector',vector2d,'type','node','layer',1,'padding',NaN) + extruded_vector=project3d(md,'vector',vector2d,'type','element','padding',0) + extruded_vector=project3d(md,'vector',vector2d,'type','node') + """ + + #some regular checks + if not md: + raise TypeError("bad usage") + if md.mesh.domaintype().lower() != '3d': + raise TypeError("input model is not 3d") + + #retrieve parameters from options. + options = pairoptions(**kwargs) + vector2d = options.getfieldvalue('vector') #mandatory + vectype = options.getfieldvalue('type') #mandatory + layer = options.getfieldvalue('layer',0) #optional (do all layers otherwise) + paddingvalue = options.getfieldvalue('padding',0) #0 by default + + vector1d=False + if isinstance(vector2d,numpy.ndarray) and numpy.ndim(vector2d)==1: + vector1d=True + vector2d=vector2d.reshape(-1,1) + + if isinstance(vector2d,(bool,int,float)) or numpy.size(vector2d)==1: + projected_vector=vector2d + + elif vectype.lower()=='node': + + #Initialize 3d vector + if vector2d.shape[0]==md.mesh.numberofvertices2d: + projected_vector=(paddingvalue*numpy.ones((md.mesh.numberofvertices, numpy.size(vector2d,axis=1)))).astype(vector2d.dtype) + elif vector2d.shape[0]==md.mesh.numberofvertices2d+1: + projected_vector=(paddingvalue*numpy.ones((md.mesh.numberofvertices+1,numpy.size(vector2d,axis=1)))).astype(vector2d.dtype) + projected_vector[-1,:]=vector2d[-1,:] + vector2d=vector2d[:-1,:] + else: + raise TypeError("vector length not supported") + + #Fill in + if layer==0: + for i in range(md.mesh.numberoflayers): + projected_vector[(i*md.mesh.numberofvertices2d):((i+1)*md.mesh.numberofvertices2d),:]=vector2d + else: + projected_vector[((layer-1)*md.mesh.numberofvertices2d):(layer*md.mesh.numberofvertices2d),:]=vector2d + + elif vectype.lower()=='element': + + #Initialize 3d vector + if vector2d.shape[0]==md.mesh.numberofelements2d: + projected_vector=(paddingvalue*numpy.ones((md.mesh.numberofelements, numpy.size(vector2d,axis=1)))).astype(vector2d.dtype) + elif vector2d.shape[0]==md.mesh.numberofelements2d+1: + projected_vector=(paddingvalue*numpy.ones((md.mesh.numberofelements+1,numpy.size(vector2d,axis=1)))).astype(vector2d.dtype) + projected_vector[-1,:]=vector2d[-1,:] + vector2d=vector2d[:-1,:] + else: + raise TypeError("vector length not supported") + + #Fill in + if layer==0: + for i in range(md.mesh.numberoflayers-1): + projected_vector[(i*md.mesh.numberofelements2d):((i+1)*md.mesh.numberofelements2d),:]=vector2d + else: + projected_vector[((layer-1)*md.mesh.numberofelements2d):(layer*md.mesh.numberofelements2d),:]=vector2d + + else: + raise TypeError("project3d error message: unknown projection type") + + if vector1d: + projected_vector=projected_vector.reshape(-1,) + + return projected_vector Index: ../trunk-jpl/src/py3/extrusion/DepthAverage.py =================================================================== --- ../trunk-jpl/src/py3/extrusion/DepthAverage.py (revision 0) +++ ../trunk-jpl/src/py3/extrusion/DepthAverage.py (revision 19895) @@ -0,0 +1,50 @@ +import numpy as npy +from project2d import project2d + +def DepthAverage(md,vector): + ''' + computes depth average of 3d vector using the trapezoidal rule, and returns + the value on the 2d mesh. + + Usage: + vector_average=DepthAverage(md,vector) + + Example: + vel_bar=DepthAverage(md,md.initialization.vel) + ''' + + #check that the model given in input is 3d + if md.mesh.elementtype() != 'Penta': + raise TypeError('DepthAverage error message: the model given in input must be 3d') + + # coerce to array in case float is passed + if type(vector)!=npy.ndarray: + print('coercing array') + vector=npy.array(value) + + vec2d=False + if vector.ndim==2: + vec2d=True + vector=vector.reshape(-1,) + + #nods data + if vector.shape[0]==md.mesh.numberofvertices: + vector_average=npy.zeros(md.mesh.numberofvertices2d) + for i in range(1,md.mesh.numberoflayers): + vector_average=vector_average+(project2d(md,vector,i)+project2d(md,vector,i+1))/2.*(project2d(md,md.mesh.z,i+1)-project2d(md,md.mesh.z,i)) + vector_average=vector_average/project2d(md,md.geometry.thickness,1) + + #element data + elif vector.shape[0]==md.mesh.numberofelements: + vector_average=npy.zeros(md.mesh.numberofelements2d) + for i in range(1,md.mesh.numberoflayers): + vector_average=vector_average+project2d(md,vector,i)*(project2d(md,md.mesh.z,i+1)-project2d(md,md.mesh.z,i)) + vector_average=vector_average/project2d(md,md.geometry.thickness,1) + + else: + raise ValueError('vector size not supported yet'); + + if vec2d: + vector_average=vector_average.reshape(-1,1) + + return vector_average Index: ../trunk-jpl/src/py3/interp/interp.py =================================================================== --- ../trunk-jpl/src/py3/interp/interp.py (revision 0) +++ ../trunk-jpl/src/py3/interp/interp.py (revision 19895) @@ -0,0 +1,240 @@ +# module for inperpolating/smoothing data +import numpy as npy +from scipy.interpolate import CloughTocher2DInterpolator, Rbf +from scipy.spatial import cKDTree +try: + import matplotlib.pyplot as plt +except ImportError: + print('could not import matplotlib, no plotting functions enabled.\ + Set plotonly=False in function call') + +def MeshSplineToMesh2d(x,y,data,xi,yi,tol=1e-6,fill_nans=False,**kwargs):#{{{ + ''' + Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D. + The interpolant is guaranteed to be continuously differentiable, + and the gradients are chosen such that the curvature of the interpolant + is approximately minimized. + + Uses scipy.interpolate.CloughTocher2DInterpolator + + x,y: data point coordinates + data: data to be interpolated (same length as x,y) + xi,yi: coordintes to interpolate data onto + tol: tolerance for gradient estimation (default 1e-6) + fill_nans: fill nan's (holes) in data using the spline fit? + **kwargs: optional keywork arguments: + maxiter: maximum iterations in gradient estimation + + Returns interpolated data at given x,y coordinates. + + Usage: + interpdata=CloughToucher2d(x,y,data) + + Examples: + interpdata=CloughToucher2d(md.mesh.x,md.mesh.y,data) + interpdata=CloughToucher2d(md.mesh.x,md.mesh.y,data,tol=1e-3,maxiter=100) + ''' + + # unpack kwargs + maxiter=kwargs.pop('maxiter',None) + if 'maxiter' in kwargs: del kwargs['maxiter'] + if maxiter: + assert type(maxiter)==int, 'error, maxiter should be an integer' + assert len(kwargs)==0, 'error, unexpected or misspelled kwargs' + + # create sub-vectors that just cover the limits of xi and yi + # TODO x,y not necessarily a grid, so need a better definition of dx,dy (e.g. average element size) + dx=500 + dy=500 + #dx=x[1]-x[0] + #dy=y[1]-y[0] + xlim=[min(xi)-dx,max(xi)+dx] + ylim=[min(yi)-dy,max(yi)+dy] + xflag=npy.logical_and(x>xlim[0],xylim[0],yxlim[0],xylim[0],yxlim[0],xcenterylim[0],ycenter0, adjusts the amount of smoothing applied. Defaults to 0, + such that the function always passes through nodal points. + z: coordinate array if interpolating in 3 dimensions + zi: coordinate array if interpolating in 3 dimensions + + Usage: + interpdata=RadialInterp(x,y,data,**kwargs) + + Examples: + interpdata=RadialInterp(md.mesh.x,md.mesh.y,data) + interpdata=RadialInterp(md.mesh.x,md.mesh.y,data,function='gaussian',epsilon=100,smooth=1) + ''' + + # unpack kwargs + function=kwargs.pop('function','gaussian') + if 'function' in kwargs: del kwargs['function'] + epsilon=kwargs.pop('epsilon',None) + if 'epsilon' in kwargs: del kwargs['epsilon'] + smooth=kwargs.pop('smooth',0) + if 'smooth' in kwargs: del kwargs['smooth'] + z=kwargs.pop('z',None) + if 'z' in kwargs: del kwargs['z'] + assert len(kwargs)==0, 'error, unexpected or misspelled kwargs' + + if z: + if epsilon: + rbfi=Rbf(x,y,z,data,function=function,smooth=smooth,epsilon=epsilon) + else: + rbfi=Rbf(x,y,z,data,function=function,smooth=smooth) + interpdata=rbfi(xi,yi,zi) + else: + if epsilon: + rbfi=Rbf(x,y,data,function=function,smooth=smooth,epsilon=epsilon) + else: + rbfi=Rbf(x,y,data,function=function,smooth=smooth) + interpdata=rbfi(xi,yi) + + return interpdata +#}}} Index: ../trunk-jpl/src/py3/interp/averaging.py =================================================================== --- ../trunk-jpl/src/py3/interp/averaging.py (revision 0) +++ ../trunk-jpl/src/py3/interp/averaging.py (revision 19895) @@ -0,0 +1,93 @@ +import numpy as npy +from GetAreas import GetAreas +from scipy.sparse import csc_matrix +import MatlabFuncs as m + +def averaging(md,data,iterations,layer=0): + ''' + AVERAGING - smooths the input over the mesh + + This routine takes a list over the elements or the nodes in input + and return a list over the nodes. + For each iterations it computes the average over each element (average + of the vertices values) and then computes the average over each node + by taking the average of the element around a node weighted by the + elements volume + For 3d mesh, a last argument can be added to specify the layer to be averaged on. + + Usage: + smoothdata=averaging(md,data,iterations) + smoothdata=averaging(md,data,iterations,layer) + + Examples: + velsmoothed=averaging(md,md.initialization.vel,4) + pressure=averaging(md,md.initialization.pressure,0) + temperature=averaging(md,md.initialization.temperature,1,1) + ''' + + if len(data)!=md.mesh.numberofelements and len(data)!=md.mesh.numberofvertices: + raise Exception('averaging error message: data not supported yet') + if md.mesh.dimension()==3 and layer!=0: + if layer<=0 or layer>md.mesh.numberoflayers: + raise ValueError('layer should be between 1 and md.mesh.numberoflayers') + else: + layer=0 + + #initialization + if layer==0: + weights=npy.zeros(md.mesh.numberofvertices,) + data=data.flatten(1) + else: + weights=npy.zeros(md.mesh.numberofvertices2d,) + data=data[(layer-1)*md.mesh.numberofvertices2d+1:layer*md.mesh.numberofvertices2d,:] + + #load some variables (it is much faster if the variabes are loaded from md once for all) + if layer==0: + index=md.mesh.elements + numberofnodes=md.mesh.numberofvertices + numberofelements=md.mesh.numberofelements + else: + index=md.mesh.elements2d + numberofnodes=md.mesh.numberofvertices2d + numberofelements=md.mesh.numberofelements2d + + + #build some variables + if md.mesh.dimension()==3 and layer==0: + rep=6 + areas=GetAreas(index,md.mesh.x,md.mesh.y,md.mesh.z) + elif md.mesh.dimension()==2: + rep=3 + areas=GetAreas(index,md.mesh.x,md.mesh.y) + else: + rep=3 + areas=GetAreas(index,md.mesh.x2d,md.mesh.y2d) + + index=index-1 # since python indexes starting from zero + line=index.flatten(1) + areas=npy.vstack(areas).reshape(-1,) + summation=1./rep*npy.ones(rep,) + linesize=rep*numberofelements + + #update weights that holds the volume of all the element holding the node i + weights=csc_matrix( (npy.tile(areas,(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1)) + + #initialization + if len(data)==numberofelements: + average_node=csc_matrix( (npy.tile(areas*data,(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1)) + average_node=average_node/weights + average_node = csc_matrix(average_node) + else: + average_node=csc_matrix(data.reshape(-1,1)) + + #loop over iteration + for i in npy.arange(1,iterations+1): + average_el=npy.asarray(npy.dot(average_node.todense()[index].reshape(numberofelements,rep),npy.vstack(summation))).reshape(-1,) + average_node=csc_matrix( (npy.tile(areas*average_el.reshape(-1),(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1)) + average_node=average_node/weights + average_node=csc_matrix(average_node) + + #return output as a full matrix (C code do not like sparse matrices) + average=npy.asarray(average_node.todense()).reshape(-1,) + + return average Index: ../trunk-jpl/src/py3/interp/SectionValues.py =================================================================== --- ../trunk-jpl/src/py3/interp/SectionValues.py (revision 0) +++ ../trunk-jpl/src/py3/interp/SectionValues.py (revision 19895) @@ -0,0 +1,139 @@ +import os +from expread import expread +import numpy as npy +from project2d import project2d +#from InterpFromMesh2d import InterpFromMesh2d +from InterpFromMeshToMesh2d import InterpFromMeshToMesh2d +from InterpFromMeshToMesh3d import InterpFromMeshToMesh3d + +def SectionValues(md,data,infile,resolution): + ''' + compute the value of a field on a section + + This routine gets the value of a given field of the model on points + given in the file infile (Argus type file). Resolution must be a list + [horizontal_resolution, vertical_resolution] + + Usage: + [elements,x,y,z,s,data]=SectionValues(md,data,filename,resolution) + [elements,x,y,z,s,data]=SectionValues(md,data,profile_structure,resolution) + ''' + + if os.path.isfile(infile): + profile=expread(infile)[0] + nods=profile['nods'] + x=profile['x'] + y=profile['y'] + else: + raise IOError('file %s not found' % infile) + + #get the specified resolution + if len(resolution)!=2: + raise ValueError('SectionValues error message: Resolution must be a list [horizontal_resolution, vertical_resolution]') + else: + res_h=resolution[0] + + if md.mesh.domaintype().lower() == '3d': + if isinstance(resolution[1],int) or isinstance(resolution[1],float): + res_v=resolution[1] + else: + raise ValueError('SectionValues error: resolution must be a length-2 list of integers or floats') + + #initialization + X=npy.array([]) #X-coordinate + Y=npy.array([]) #Y-coordinate + S=npy.array([0.]) #curvilinear coordinate + + for i in range(nods-1): + + x_start=x[i] + x_end=x[i+1] + y_start=y[i] + y_end=y[i+1] + s_start=S[-1] + + length_segment=npy.sqrt((x_end-x_start)**2+(y_end-y_start)**2) + portion=npy.ceil(length_segment/res_h) + + x_segment=npy.zeros(portion) + y_segment=npy.zeros(portion) + s_segment=npy.zeros(portion) + + for j in range(int(portion)): + x_segment[j]=x_start+(j)*(x_end-x_start)/portion + y_segment[j]=y_start+(j)*(y_end-y_start)/portion + s_segment[j]=s_start+j*length_segment/portion + + #plug into X and Y + X=npy.append(X,x_segment) + Y=npy.append(Y,y_segment) + S=npy.append(S,s_segment) + + X=npy.append(X,x[nods-1]) + Y=npy.append(Y,y[nods-1]) + + #Number of nodes: + numberofnodes=X.shape[0] + + #Compute Z + Z=npy.zeros(numberofnodes) + + #New mesh and Data interpolation + if '2d' in md.mesh.domaintype().lower(): + + #Interpolation of data on specified points + #data_interp=InterpFromMesh2d(md.mesh.elements,md.mesh.x,md.mesh.y,data,X,Y)[0] + data_interp=InterpFromMeshToMesh2d(md.mesh.elements,md.mesh.x,md.mesh.y,data,X,Y)[0] + #data_interp=griddata(md.mesh.x,md.mesh.y,data,X,Y) + + #Compute index + index=npy.array([list(range(1,numberofnodes)),list(range(2,numberofnodes+1))]).T + + else: + + #vertically extrude mesh + + #Get base and surface for each 2d point, offset to make sure that it is inside the glacier system + offset=1.e-3 + base=InterpFromMeshToMesh2d(md.mesh.elements2d,md.mesh.x2d,md.mesh.y2d,project2d(md,md.geometry.base,1),X,Y)[0]+offset + base=base.reshape(-1,) + surface=InterpFromMeshToMesh2d(md.mesh.elements2d,md.mesh.x2d,md.mesh.y2d,project2d(md,md.geometry.surface,1),X,Y)[0]-offset + surface=surface.reshape(-1,) + + #Some useful parameters + layers=int(npy.ceil(npy.mean(md.geometry.thickness)/res_v)) + nodesperlayer=int(numberofnodes) + nodestot=int(nodesperlayer*layers) + elementsperlayer=int(nodesperlayer-1) + elementstot=int((nodesperlayer-1)*(layers-1)) + + #initialization + X3=npy.zeros(nodesperlayer*layers) + Y3=npy.zeros(nodesperlayer*layers) + Z3=npy.zeros(nodesperlayer*layers) + S3=npy.zeros(nodesperlayer*layers) + index3=npy.zeros((elementstot,4)) + + #Get new coordinates in 3d + for i in range(1,layers+1): + X3[i-1::layers]=X + Y3[i-1::layers]=Y + Z3[i-1::layers]=base+(i-1)*(surface-base)/(layers-1) + S3[i-1::layers]=S + + if i1') + + if len(x) != len(data) or len(y) != len(data): + raise Exception('nearestneighbors error: x and y should have the same length as "data"') + + filled=data + + XYGood=npy.dstack([x[goodids],y[goodids]])[0] + XYBad=npy.dstack([x[badids],y[badids]])[0] + tree=cKDTree(XYGood) + nearest=tree.query(XYBad,k=knn)[1] + + if knn==1: + filled[badids]=filled[goodids][nearest] # can add k=N to return the N nearest neighbors + else: + for i in range(len(badids)): + neardat=[] + for j in range(knn): + neardat.append(filled[goodids][nearest[i][j]]) + filled[badids[i]]=npy.mean(neardat) + + return filled Index: ../trunk-jpl/src/py3/consistency/QueueRequirements.py =================================================================== --- ../trunk-jpl/src/py3/consistency/QueueRequirements.py (revision 0) +++ ../trunk-jpl/src/py3/consistency/QueueRequirements.py (revision 19895) @@ -0,0 +1,20 @@ +def QueueRequirements(queudict,queue,np,time): + #QUEUEREQUIREMENTS - queue requirements in time, number of cpus, by name of queue. + # + # Usage: + # QueueRequirements(available_queues,queue_requirements_time,queue_requirements_np,np,time) + + #Ok, go through requirements for current queue: + try: + rtime=queudict[queue][0] + except KeyError: + raise Exception('QueueRequirements error message: availables queues are '+ queuedict.keys) + + if time<=0: + raise Exception('QueueRequirements: time should be a positive number') + if time>rtime: + raise Exception('QueueRequirements: time should be < '+ str(rtime)+ ' for queue: '+ queue) + + #check on np requirements + if np<=0: + raise Exception('QueueRequirements: np should be a positive number') Index: ../trunk-jpl/src/py3/consistency/ismodelselfconsistent.py =================================================================== --- ../trunk-jpl/src/py3/consistency/ismodelselfconsistent.py (revision 0) +++ ../trunk-jpl/src/py3/consistency/ismodelselfconsistent.py (revision 19895) @@ -0,0 +1,91 @@ +from EnumDefinitions import * +from EnumToString import EnumToString + +def AnalysisConfiguration(solutiontype): #{{{ + """ + ANALYSISCONFIGURATION - return type of analyses, number of analyses + + Usage: + [analyses]=AnalysisConfiguration(solutiontype); + """ + + if solutiontype == StressbalanceSolutionEnum(): + analyses=[StressbalanceAnalysisEnum(),StressbalanceVerticalAnalysisEnum(),StressbalanceSIAAnalysisEnum(),L2ProjectionBaseAnalysisEnum()] + + elif solutiontype == SteadystateSolutionEnum(): + analyses=[StressbalanceAnalysisEnum(),StressbalanceVerticalAnalysisEnum(),StressbalanceSIAAnalysisEnum(),L2ProjectionBaseAnalysisEnum(),ThermalAnalysisEnum(),MeltingAnalysisEnum()] + + elif solutiontype == ThermalSolutionEnum(): + analyses=[EnthalpyAnalysisEnum(),ThermalAnalysisEnum(),MeltingAnalysisEnum()] + + elif solutiontype == MasstransportSolutionEnum(): + analyses=[MasstransportAnalysisEnum()] + + elif solutiontype == BalancethicknessSolutionEnum(): + analyses=[BalancethicknessAnalysisEnum()] + + elif solutiontype == SurfaceSlopeSolutionEnum(): + analyses=[L2ProjectionBaseAnalysisEnum()] + + elif solutiontype == BalancevelocitySolutionEnum(): + analyses=[BalancevelocityAnalysisEnum()] + + elif solutiontype == BedSlopeSolutionEnum(): + analyses=[L2ProjectionBaseAnalysisEnum()] + + elif solutiontype == GiaSolutionEnum(): + analyses=[GiaAnalysisEnum()] + + elif solutiontype == TransientSolutionEnum(): + analyses=[StressbalanceAnalysisEnum(),StressbalanceVerticalAnalysisEnum(),StressbalanceSIAAnalysisEnum(),L2ProjectionBaseAnalysisEnum(),ThermalAnalysisEnum(),MeltingAnalysisEnum(),EnthalpyAnalysisEnum(),MasstransportAnalysisEnum()] + + elif solutiontype == FlaimSolutionEnum(): + analyses=[FlaimAnalysisEnum()] + + elif solutiontype == HydrologySolutionEnum(): + analyses=[L2ProjectionBaseAnalysisEnum(),HydrologyShreveAnalysisEnum(),HydrologyDCInefficientAnalysisEnum(),HydrologyDCEfficientAnalysisEnum()] + + elif DamageEvolutionSolutionEnum(): + analyses=[DamageEvolutionAnalysisEnum()] + + else: + raise TypeError("solution type: '%s' not supported yet!" % EnumToString(solutiontype)[0]) + + return analyses +#}}} + +def ismodelselfconsistent(md): + """ + ISMODELSELFCONSISTENT - check that model forms a closed form solvable problem. + + Usage: + ismodelselfconsistent(md), + """ + + #initialize consistency as true + md.private.isconsistent=True + + #Get solution and associated analyses + solution=md.private.solution + analyses=AnalysisConfiguration(solution) + + #Go through a model fields, check that it is a class, and call checkconsistency + fields=vars(md) +# for field in fields.iterkeys(): + for field in md.properties(): + + #Some properties do not need to be checked + if field in ['results','debug','radaroverlay']: + continue + + #Check that current field is an object + if not hasattr(getattr(md,field),'checkconsistency'): + md.checkmessage("field '%s' is not an object." % field) + + #Check consistency of the object + exec("md.%s.checkconsistency(md,solution,analyses)" % field) + + #error message if mode is not consistent + if not md.private.isconsistent: + raise RuntimeError('Model not consistent, see messages above.') + Index: ../trunk-jpl/src/py3/consistency/checkfield.py =================================================================== --- ../trunk-jpl/src/py3/consistency/checkfield.py (revision 0) +++ ../trunk-jpl/src/py3/consistency/checkfield.py (revision 19895) @@ -0,0 +1,183 @@ +import numpy +import os +from pairoptions import pairoptions +import MatlabFuncs as m + +def checkfield(md,**kwargs): + """ + CHECKFIELD - check field consistency + + Used to check model consistency., + Requires: + 'field' or 'fieldname' option. If 'fieldname' is provided, it will retrieve it from the model md. (md.(fieldname)) + If 'field' is provided, it will assume the argument following 'field' is a numeric array. + + Available options: + - NaN: 1 if check that there is no NaN + - size: [lines cols], NaN for non checked dimensions + - >: greater than provided value + - >=: greater or equal to provided value + - <: smallerthan provided value + - <=: smaller or equal to provided value + - < vec: smallerthan provided values on each vertex + - timeseries: 1 if check time series consistency (size and time) + - values: cell of strings or vector of acceptable values + - numel: list of acceptable number of elements + - cell: 1 if check that is cell + - empty: 1 if check that non empty + - message: overloaded error message + + Usage: + md = checkfield(md,fieldname,options); + """ + + #get options + options=pairoptions(**kwargs) + + #get field from model + if options.exist('field'): + field=options.getfieldvalue('field') + fieldname=options.getfieldvalue('fieldname','no fieldname') + else: + fieldname=options.getfieldvalue('fieldname') + exec("field=md.%s" % fieldname) + + if isinstance(field,(bool,int,float)): + field=numpy.array([field]) + + #check empty + if options.exist('empty'): + if not field: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' is empty" % fieldname)) + + #Check size + if options.exist('size'): + fieldsize=options.getfieldvalue('size') + if len(fieldsize) == 1: + if numpy.isnan(fieldsize[0]): + pass + elif not numpy.size(field,0)==fieldsize[0]: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' size should be %d" % (fieldname,fieldsize[0]))) + elif len(fieldsize) == 2: + if numpy.isnan(fieldsize[0]): + if not numpy.size(field,1)==fieldsize[1]: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have %d columns" % (fieldname,fieldsize[1]))) + elif numpy.isnan(fieldsize[1]): + if not numpy.size(field,0)==fieldsize[0]: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have %d lines" % (fieldname,fieldsize[0]))) + else: + if (not numpy.size(field,0)==fieldsize[0]) or (not numpy.size(field,1)==fieldsize[1]): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' size should be %d x %d" % (fieldname,fieldsize[0],fieldsize[1]))) + + #Check numel + if options.exist('numel'): + fieldnumel=options.getfieldvalue('numel') + if numpy.size(field) not in fieldnumel: + if len(fieldnumel)==1: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' size should be %d" % (fieldname,fieldnumel))) + elif len(fieldnumel)==2: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' size should be %d or %d" % (fieldname,fieldnumel[0],fieldnumel[1]))) + else: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' size should be %s" % (fieldname,fieldnumel))) + + #check NaN + if options.getfieldvalue('NaN',0): + if numpy.any(numpy.isnan(field)): + md = md.checkmessage(options.getfieldvalue('message',\ + "NaN values found in field '%s'" % fieldname)) + + #check cell + if options.getfieldvalue('cell',0): + if not isinstance(field,(tuple,list,dict)): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should be a cell" % fieldname)) + + #check values + if options.exist('values'): + fieldvalues=options.getfieldvalue('values') + if False in m.ismember(field,fieldvalues): + if len(fieldvalues)==1: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' value should be '%s'" % (fieldname,fieldvalues[0]))) + elif len(fieldvalues)==2: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' values should be '%s' or '%s'" % (fieldname,fieldvalues[0],fieldvalues[1]))) + else: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have values in %s" % (fieldname,fieldvalues))) + + #check greater + if options.exist('>='): + lowerbound=options.getfieldvalue('>=') + if numpy.any(field'): + lowerbound=options.getfieldvalue('>') + if numpy.any(field<=lowerbound): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have values above %d" % (fieldname,lowerbound))) + + #check smaller + if options.exist('<='): + upperbound=options.getfieldvalue('<=') + if numpy.any(field>upperbound): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have values below %d" % (fieldname,upperbound))) + if options.exist('<'): + upperbound=options.getfieldvalue('<') + if numpy.any(field>=upperbound): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have values below %d" % (fieldname,upperbound))) + + #check file + if options.getfieldvalue('file',0): + if not os.path.exists(field): + md = md.checkmessage("file provided in '%s': '%s' does not exist" % (fieldname,field)) + + #Check row of strings + if options.exist('stringrow'): + if not isinstance(field,list): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should be a list" %fieldname)) + + #Check forcings (size and times) + if options.getfieldvalue('timeseries',0): + if numpy.size(field,0)==md.mesh.numberofvertices: + if numpy.ndim(field)>1 and not numpy.size(field,1)==1: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have only one column as there are md.mesh.numberofvertices lines" % fieldname)) + elif numpy.size(field,0)==md.mesh.numberofvertices+1 or numpy.size(field,0)==2: + if not all(field[-1,:]==numpy.sort(field[-1,:])): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' columns should be sorted chronologically" % fieldname)) + if any(field[-1,0:-1]==field[-1,1:]): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' columns must not contain duplicate timesteps" % fieldname)) + else: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have md.mesh.numberofvertices or md.mesh.numberofvertices+1 lines" % fieldname)) + + #Check single value forcings (size and times) + if options.getfieldvalue('singletimeseries',0): + if numpy.size(field,0)==2: + if not all(field[-1,:]==numpy.sort(field[-1,:])): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' columns should be sorted chronologically" % fieldname)) + if any(field[-1,0:-1]==field[-1,1:]): + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' columns must not contain duplicate timesteps" % fieldname)) + else: + md = md.checkmessage(options.getfieldvalue('message',\ + "field '%s' should have 2 lines" % fieldname)) + + return md + Index: ../trunk-jpl/src/py3/miscellaneous/fielddisplay.py =================================================================== --- ../trunk-jpl/src/py3/miscellaneous/fielddisplay.py (revision 0) +++ ../trunk-jpl/src/py3/miscellaneous/fielddisplay.py (revision 19895) @@ -0,0 +1,140 @@ +#Module import +import numpy +from math import isnan +import MatlabFuncs as m + +def fielddisplay(md,name,comment): + """ + FIELDDISPLAY - display model field + + Usage: + fielddisplay(md,name,comment) + """ + + #get field + field=getattr(md,name) + + #disp corresponding line as a function of field type (offset set as 9 spaces) + return parsedisplay(" ",name,field,comment); + +def parsedisplay(offset,name,field,comment): # {{{ + + #string + if isinstance(field,str): + + if len(field)>30: + string=displayunit(offset,name,"not displayed",comment) + else: + string=displayunit(offset,name,"'%s'" % field,comment) + + #numeric + elif isinstance(field,(int,float)): + string=displayunit(offset,name,str(field),comment) + + #matrix + elif isinstance(field,numpy.ndarray): + string=displayunit(offset,name,str(field.shape),comment) + + #logical + elif isinstance(field,bool): + if field: + string=displayunit(offset,name,"True",comment) + else: + string=displayunit(offset,name,"False",comment) + + #dictionary + elif isinstance(field,dict): + string=dict_display(offset,name,field,comment) + + #list or tuple + elif isinstance(field,(list,tuple)): + string=list_display(offset,name,field,comment) + + #None + elif field is None: + string=displayunit(offset,name,"None",comment) + + else: + string=displayunit(offset,name,"not displayed",comment) + + return string + # }}} + +def dict_display(offset,name,field,comment): # {{{ + + if field: + string =displayunit(offset,name,'{dictionary}',comment)+'\n' + offset+=' ' + + for structure_field,sfield in field.items(): + string+=parsedisplay(offset,str(structure_field),sfield,'')+'\n' + + if string and string[-1]=='\n': + string=string[:-1] + + else: + string=displayunit(offset,name,'N/A',comment) + + return string + # }}} + +def list_display(offset,name,field,comment): # {{{ + + #initialization + if isinstance(field,list): + sbeg='[' + send=']' + elif isinstance(field,tuple): + sbeg='(' + send=')' + string=sbeg + + #go through the cell and fill string + if len(field)<5: + for fieldi in field: + if isinstance(fieldi,str): + string+="'%s'," % fieldi + elif isinstance(fieldi,(bool,int,float)): + string+="%s," % str(fieldi) + else: + string=sbeg + break + + if m.strcmp(string,sbeg): + string="%s%dx1%s" % (sbeg,len(field),send) + else: + string=string[:-1]+send + + #call displayunit + return displayunit(offset,name,string,comment) + # }}} + +def displayunit(offset,name,characterization,comment): # {{{ + + #take care of name + if len(name)>23: + name="%s..." % name[:20] + + #take care of characterization + if m.strcmp(characterization,"''") or m.strcmp(characterization,'""') or m.strcmpi(characterization,'nan'): + characterization="N/A" + + if len(characterization)>15: + characterization="%s..." % characterization[:12] + + #print + if not comment: + string="%s%-23s: %-15s" % (offset,name,characterization) + else: + if isinstance(comment,str): + string="%s%-23s: %-15s -- %s" % (offset,name,characterization,comment) + elif isinstance(comment,list): + string="%s%-23s: %-15s -- %s" % (offset,name,characterization,comment[0]) + for commenti in comment: + string+="\n%s%-23s %-15s %s" % (offset,'','',commenti) + else: + raise RuntimeError("fielddisplay error message: format for comment not supported yet") + + return string + # }}} + Index: ../trunk-jpl/src/py3/miscellaneous/parallelrange.py =================================================================== --- ../trunk-jpl/src/py3/miscellaneous/parallelrange.py (revision 0) +++ ../trunk-jpl/src/py3/miscellaneous/parallelrange.py (revision 19895) @@ -0,0 +1,25 @@ +#! /usr/bin/env python +def parallelrange(rank,numprocs,globalsize): + """ + PARALLELRANGE - from a rank, and a number of processors, figure out a range, for parallel tasks. + + Usage: + i1,i2=parallelrange(rank,numprocs,globalsize) + """ + + #We use floor. we under distribute rows. The rows left are then redistributed, therefore resulting in a more even distribution. + num_local_rows=[int(globalsize/numprocs) for i in range(numprocs)] + + #There may be some rows left. Distribute evenly. + row_rest=globalsize - numprocs*int(globalsize/numprocs) + + for i in range(row_rest): + num_local_rows[i]=num_local_rows[i]+1 + + i1=0 + for i in range(rank-1): + i1+=num_local_rows[i] + i2=i1+num_local_rows[rank-1]-1 + + return i1,i2 + Index: ../trunk-jpl/src/py3/miscellaneous/isnans.py =================================================================== --- ../trunk-jpl/src/py3/miscellaneous/isnans.py (revision 0) +++ ../trunk-jpl/src/py3/miscellaneous/isnans.py (revision 19895) @@ -0,0 +1,18 @@ +import numpy + +def isnans(array): + """ + ISNANS: figure out if an array is nan. wrapper to isnan from matlab which stupidly does not allow this test for structures! + + Usage: isnans(array) + + See also : ISNAN + """ + + if isinstance(array,(tuple,list,dict)): + returnvalue=0 + else: + returnvalue=numpy.isnan(array) + + return returnvalue + Index: ../trunk-jpl/src/py3/miscellaneous/MatlabFuncs.py =================================================================== --- ../trunk-jpl/src/py3/miscellaneous/MatlabFuncs.py (revision 0) +++ ../trunk-jpl/src/py3/miscellaneous/MatlabFuncs.py (revision 19895) @@ -0,0 +1,107 @@ +def oshostname(): + import socket + + return socket.gethostname() + +def ispc(): + import platform + + if 'Windows' in platform.system(): + return True + else: + return False + +def ismac(): + import platform + + if 'Darwin' in platform.system(): + return True + else: + return False + +def strcmp(s1,s2): + + if s1 == s2: + return True + else: + return False + +def strncmp(s1,s2,n): + + if s1[0:n] == s2[0:n]: + return True + else: + return False + +def strcmpi(s1,s2): + + if s1.lower() == s2.lower(): + return True + else: + return False + +def strncmpi(s1,s2,n): + + if s1.lower()[0:n] == s2.lower()[0:n]: + return True + else: + return False + +def ismember(a,s): + import numpy + + if not isinstance(s,(tuple,list,dict,numpy.ndarray)): + s=[s] + + if not isinstance(a,(tuple,list,dict,numpy.ndarray)): + a=[a] + + if not isinstance(a,numpy.ndarray): + b=[item in s for item in a] + + else: + if not isinstance(s,numpy.ndarray): + b=numpy.empty_like(a) + for i,item in enumerate(a.flat): + b.flat[i]=item in s + else: + b=numpy.in1d(a.flat,s.flat).reshape(a.shape) + + return b + +def det(a): + import numpy + + if a.shape==(1,): + return a[0] + elif a.shape==(1,1): + return a[0,0] + elif a.shape==(2,2): + return a[0,0]*a[1,1]-a[0,1]*a[1,0] + else: + raise TypeError("MatlabFunc.det only implemented for shape (2, 2), not for shape %s." % str(a.shape)) + +def sparse(ivec,jvec,svec,m=0,n=0,nzmax=0): + import numpy + + if not m: + m=numpy.max(ivec) + if not n: + n=numpy.max(jvec) + + a=numpy.zeros((m,n)) + + for i,j,s in zip(ivec.reshape(-1,order='F'),jvec.reshape(-1,order='F'),svec.reshape(-1,order='F')): + a[i-1,j-1]+=s + + return a + +def heaviside(x): + import numpy + + y=numpy.zeros_like(x) + y[numpy.nonzero(x> 0.)]=1. + y[numpy.nonzero(x==0.)]=0.5 + + return y + Index: ../trunk-jpl/src/py3/miscellaneous/PythonFuncs.py =================================================================== --- ../trunk-jpl/src/py3/miscellaneous/PythonFuncs.py (revision 0) +++ ../trunk-jpl/src/py3/miscellaneous/PythonFuncs.py (revision 19895) @@ -0,0 +1,24 @@ +def logical_and_n(*arg): + from numpy import logical_and + + if len(arg): + result=arg[0] + for item in arg[1:]: + result=logical_and(result,item) + return result + + else: + return None + +def logical_or_n(*arg): + from numpy import logical_or + + if len(arg): + result=arg[0] + for item in arg[1:]: + result=logical_or(result,item) + return result + + else: + return None + Index: ../trunk-jpl/src/py3/enum/EnumDefinitions.py =================================================================== --- ../trunk-jpl/src/py3/enum/EnumDefinitions.py (revision 0) +++ ../trunk-jpl/src/py3/enum/EnumDefinitions.py (revision 19895) @@ -0,0 +1,962 @@ +from StringToEnum import StringToEnum + +""" + +WARNING: DO NOT MODIFY THIS FILE +this file has been automatically generated by src/c/shared/Enum/Synchronize.sh +Please read src/c/shared/Enum/README for more information + +""" + +def FemModelEnum(): return StringToEnum("FemModel")[0] +def AutodiffIsautodiffEnum(): return StringToEnum("AutodiffIsautodiff")[0] +def AutodiffNumDependentsEnum(): return StringToEnum("AutodiffNumDependents")[0] +def AutodiffNumDependentObjectsEnum(): return StringToEnum("AutodiffNumDependentObjects")[0] +def AutodiffDependentObjectNamesEnum(): return StringToEnum("AutodiffDependentObjectNames")[0] +def AutodiffDependentObjectTypesEnum(): return StringToEnum("AutodiffDependentObjectTypes")[0] +def AutodiffDependentObjectIndicesEnum(): return StringToEnum("AutodiffDependentObjectIndices")[0] +def AutodiffDependentObjectsEnum(): return StringToEnum("AutodiffDependentObjects")[0] +def AutodiffNumIndependentsEnum(): return StringToEnum("AutodiffNumIndependents")[0] +def AutodiffNumIndependentObjectsEnum(): return StringToEnum("AutodiffNumIndependentObjects")[0] +def AutodiffIndependentObjectNamesEnum(): return StringToEnum("AutodiffIndependentObjectNames")[0] +def AutodiffIndependentObjectTypesEnum(): return StringToEnum("AutodiffIndependentObjectTypes")[0] +def AutodiffIndependentObjectsEnum(): return StringToEnum("AutodiffIndependentObjects")[0] +def AutodiffJacobianEnum(): return StringToEnum("AutodiffJacobian")[0] +def AutodiffXpEnum(): return StringToEnum("AutodiffXp")[0] +def AutodiffDriverEnum(): return StringToEnum("AutodiffDriver")[0] +def AutodiffFosForwardIndexEnum(): return StringToEnum("AutodiffFosForwardIndex")[0] +def AutodiffFovForwardIndicesEnum(): return StringToEnum("AutodiffFovForwardIndices")[0] +def AutodiffFosReverseIndexEnum(): return StringToEnum("AutodiffFosReverseIndex")[0] +def AutodiffMassFluxSegmentsPresentEnum(): return StringToEnum("AutodiffMassFluxSegmentsPresent")[0] +def AutodiffKeepEnum(): return StringToEnum("AutodiffKeep")[0] +def AutodiffObufsizeEnum(): return StringToEnum("AutodiffObufsize")[0] +def AutodiffLbufsizeEnum(): return StringToEnum("AutodiffLbufsize")[0] +def AutodiffCbufsizeEnum(): return StringToEnum("AutodiffCbufsize")[0] +def AutodiffTbufsizeEnum(): return StringToEnum("AutodiffTbufsize")[0] +def AutodiffGcTriggerRatioEnum(): return StringToEnum("AutodiffGcTriggerRatio")[0] +def AutodiffGcTriggerMaxSizeEnum(): return StringToEnum("AutodiffGcTriggerMaxSize")[0] +def BalancethicknessSpcthicknessEnum(): return StringToEnum("BalancethicknessSpcthickness")[0] +def BalancethicknessStabilizationEnum(): return StringToEnum("BalancethicknessStabilization")[0] +def BalancethicknessThickeningRateEnum(): return StringToEnum("BalancethicknessThickeningRate")[0] +def BasalforcingsEnum(): return StringToEnum("Basalforcings")[0] +def BasalforcingsGeothermalfluxEnum(): return StringToEnum("BasalforcingsGeothermalflux")[0] +def BasalforcingsGroundediceMeltingRateEnum(): return StringToEnum("BasalforcingsGroundediceMeltingRate")[0] +def BasalforcingsFloatingiceMeltingRateEnum(): return StringToEnum("BasalforcingsFloatingiceMeltingRate")[0] +def BasalforcingsDeepwaterMeltingRateEnum(): return StringToEnum("BasalforcingsDeepwaterMeltingRate")[0] +def BasalforcingsDeepwaterElevationEnum(): return StringToEnum("BasalforcingsDeepwaterElevation")[0] +def BasalforcingsUpperwaterElevationEnum(): return StringToEnum("BasalforcingsUpperwaterElevation")[0] +def BasalforcingsMeltrateFactorEnum(): return StringToEnum("BasalforcingsMeltrateFactor")[0] +def BasalforcingsThresholdThicknessEnum(): return StringToEnum("BasalforcingsThresholdThickness")[0] +def BasalforcingsUpperdepthMeltEnum(): return StringToEnum("BasalforcingsUpperdepthMelt")[0] +def FloatingMeltRateEnum(): return StringToEnum("FloatingMeltRate")[0] +def LinearFloatingMeltRateEnum(): return StringToEnum("LinearFloatingMeltRate")[0] +def MismipFloatingMeltRateEnum(): return StringToEnum("MismipFloatingMeltRate")[0] +def BedEnum(): return StringToEnum("Bed")[0] +def BaseEnum(): return StringToEnum("Base")[0] +def ConstantsGEnum(): return StringToEnum("ConstantsG")[0] +def ConstantsReferencetemperatureEnum(): return StringToEnum("ConstantsReferencetemperature")[0] +def ConstantsYtsEnum(): return StringToEnum("ConstantsYts")[0] +def DependentObjectEnum(): return StringToEnum("DependentObject")[0] +def StressbalanceAbstolEnum(): return StringToEnum("StressbalanceAbstol")[0] +def StressbalanceIsnewtonEnum(): return StringToEnum("StressbalanceIsnewton")[0] +def StressbalanceMaxiterEnum(): return StringToEnum("StressbalanceMaxiter")[0] +def StressbalancePenaltyFactorEnum(): return StringToEnum("StressbalancePenaltyFactor")[0] +def StressbalanceReferentialEnum(): return StringToEnum("StressbalanceReferential")[0] +def StressbalanceReltolEnum(): return StringToEnum("StressbalanceReltol")[0] +def StressbalanceNumRequestedOutputsEnum(): return StringToEnum("StressbalanceNumRequestedOutputs")[0] +def StressbalanceRequestedOutputsEnum(): return StringToEnum("StressbalanceRequestedOutputs")[0] +def StressbalanceRestolEnum(): return StringToEnum("StressbalanceRestol")[0] +def StressbalanceRiftPenaltyLockEnum(): return StringToEnum("StressbalanceRiftPenaltyLock")[0] +def StressbalanceRiftPenaltyThresholdEnum(): return StringToEnum("StressbalanceRiftPenaltyThreshold")[0] +def StressbalanceShelfDampeningEnum(): return StringToEnum("StressbalanceShelfDampening")[0] +def StressbalanceSpcvxEnum(): return StringToEnum("StressbalanceSpcvx")[0] +def StressbalanceSpcvyEnum(): return StringToEnum("StressbalanceSpcvy")[0] +def StressbalanceSpcvzEnum(): return StringToEnum("StressbalanceSpcvz")[0] +def StressbalanceFSreconditioningEnum(): return StringToEnum("StressbalanceFSreconditioning")[0] +def StressbalanceVertexPairingEnum(): return StringToEnum("StressbalanceVertexPairing")[0] +def StressbalanceViscosityOvershootEnum(): return StringToEnum("StressbalanceViscosityOvershoot")[0] +def LoadingforceXEnum(): return StringToEnum("LoadingforceX")[0] +def LoadingforceYEnum(): return StringToEnum("LoadingforceY")[0] +def LoadingforceZEnum(): return StringToEnum("LoadingforceZ")[0] +def FlowequationBorderSSAEnum(): return StringToEnum("FlowequationBorderSSA")[0] +def FlowequationBorderHOEnum(): return StringToEnum("FlowequationBorderHO")[0] +def FlowequationBorderFSEnum(): return StringToEnum("FlowequationBorderFS")[0] +def FlowequationElementEquationEnum(): return StringToEnum("FlowequationElementEquation")[0] +def FlowequationIsSIAEnum(): return StringToEnum("FlowequationIsSIA")[0] +def FlowequationIsSSAEnum(): return StringToEnum("FlowequationIsSSA")[0] +def FlowequationIsL1L2Enum(): return StringToEnum("FlowequationIsL1L2")[0] +def FlowequationIsHOEnum(): return StringToEnum("FlowequationIsHO")[0] +def FlowequationIsFSEnum(): return StringToEnum("FlowequationIsFS")[0] +def FlowequationFeSSAEnum(): return StringToEnum("FlowequationFeSSA")[0] +def FlowequationFeHOEnum(): return StringToEnum("FlowequationFeHO")[0] +def FlowequationFeFSEnum(): return StringToEnum("FlowequationFeFS")[0] +def FlowequationVertexEquationEnum(): return StringToEnum("FlowequationVertexEquation")[0] +def FrictionAsEnum(): return StringToEnum("FrictionAs")[0] +def FrictionCoefficientEnum(): return StringToEnum("FrictionCoefficient")[0] +def FrictionCoefficientcoulombEnum(): return StringToEnum("FrictionCoefficientcoulomb")[0] +def FrictionPEnum(): return StringToEnum("FrictionP")[0] +def FrictionQEnum(): return StringToEnum("FrictionQ")[0] +def FrictionMEnum(): return StringToEnum("FrictionM")[0] +def FrictionCEnum(): return StringToEnum("FrictionC")[0] +def FrictionLawEnum(): return StringToEnum("FrictionLaw")[0] +def FrictionGammaEnum(): return StringToEnum("FrictionGamma")[0] +def FrictionFEnum(): return StringToEnum("FrictionF")[0] +def FrictionWaterLayerEnum(): return StringToEnum("FrictionWaterLayer")[0] +def FrictionEffectivePressureEnum(): return StringToEnum("FrictionEffectivePressure")[0] +def FrictionCouplingEnum(): return StringToEnum("FrictionCoupling")[0] +def GeometryHydrostaticRatioEnum(): return StringToEnum("GeometryHydrostaticRatio")[0] +def HydrologyModelEnum(): return StringToEnum("HydrologyModel")[0] +def HydrologyshreveEnum(): return StringToEnum("Hydrologyshreve")[0] +def HydrologyshreveSpcwatercolumnEnum(): return StringToEnum("HydrologyshreveSpcwatercolumn")[0] +def HydrologyshreveStabilizationEnum(): return StringToEnum("HydrologyshreveStabilization")[0] +def HydrologydcEnum(): return StringToEnum("Hydrologydc")[0] +def SedimentHeadEnum(): return StringToEnum("SedimentHead")[0] +def SedimentHeadOldEnum(): return StringToEnum("SedimentHeadOld")[0] +def SedimentHeadResidualEnum(): return StringToEnum("SedimentHeadResidual")[0] +def EffectivePressureEnum(): return StringToEnum("EffectivePressure")[0] +def EplHeadEnum(): return StringToEnum("EplHead")[0] +def EplHeadOldEnum(): return StringToEnum("EplHeadOld")[0] +def EplHeadSlopeXEnum(): return StringToEnum("EplHeadSlopeX")[0] +def EplHeadSlopeYEnum(): return StringToEnum("EplHeadSlopeY")[0] +def EplZigZagCounterEnum(): return StringToEnum("EplZigZagCounter")[0] +def HydrologydcMaxIterEnum(): return StringToEnum("HydrologydcMaxIter")[0] +def HydrologydcRelTolEnum(): return StringToEnum("HydrologydcRelTol")[0] +def HydrologydcSpcsedimentHeadEnum(): return StringToEnum("HydrologydcSpcsedimentHead")[0] +def HydrologydcSedimentCompressibilityEnum(): return StringToEnum("HydrologydcSedimentCompressibility")[0] +def HydrologydcSedimentPorosityEnum(): return StringToEnum("HydrologydcSedimentPorosity")[0] +def HydrologydcSedimentThicknessEnum(): return StringToEnum("HydrologydcSedimentThickness")[0] +def HydrologydcSedimentTransmitivityEnum(): return StringToEnum("HydrologydcSedimentTransmitivity")[0] +def HydrologydcWaterCompressibilityEnum(): return StringToEnum("HydrologydcWaterCompressibility")[0] +def HydrologydcSpceplHeadEnum(): return StringToEnum("HydrologydcSpceplHead")[0] +def HydrologydcMaskEplactiveNodeEnum(): return StringToEnum("HydrologydcMaskEplactiveNode")[0] +def HydrologydcMaskEplactiveEltEnum(): return StringToEnum("HydrologydcMaskEplactiveElt")[0] +def HydrologydcEplCompressibilityEnum(): return StringToEnum("HydrologydcEplCompressibility")[0] +def HydrologydcEplPorosityEnum(): return StringToEnum("HydrologydcEplPorosity")[0] +def HydrologydcEplInitialThicknessEnum(): return StringToEnum("HydrologydcEplInitialThickness")[0] +def HydrologydcEplColapseThicknessEnum(): return StringToEnum("HydrologydcEplColapseThickness")[0] +def HydrologydcEplMaxThicknessEnum(): return StringToEnum("HydrologydcEplMaxThickness")[0] +def HydrologydcEplThicknessEnum(): return StringToEnum("HydrologydcEplThickness")[0] +def HydrologydcEplThicknessOldEnum(): return StringToEnum("HydrologydcEplThicknessOld")[0] +def HydrologydcEplThickCompEnum(): return StringToEnum("HydrologydcEplThickComp")[0] +def HydrologydcEplConductivityEnum(): return StringToEnum("HydrologydcEplConductivity")[0] +def HydrologydcIsefficientlayerEnum(): return StringToEnum("HydrologydcIsefficientlayer")[0] +def HydrologydcSedimentlimitFlagEnum(): return StringToEnum("HydrologydcSedimentlimitFlag")[0] +def HydrologydcSedimentlimitEnum(): return StringToEnum("HydrologydcSedimentlimit")[0] +def HydrologydcTransferFlagEnum(): return StringToEnum("HydrologydcTransferFlag")[0] +def HydrologydcLeakageFactorEnum(): return StringToEnum("HydrologydcLeakageFactor")[0] +def HydrologydcPenaltyFactorEnum(): return StringToEnum("HydrologydcPenaltyFactor")[0] +def HydrologydcPenaltyLockEnum(): return StringToEnum("HydrologydcPenaltyLock")[0] +def HydrologydcEplflipLockEnum(): return StringToEnum("HydrologydcEplflipLock")[0] +def HydrologydcBasalMoulinInputEnum(): return StringToEnum("HydrologydcBasalMoulinInput")[0] +def HydrologyLayerEnum(): return StringToEnum("HydrologyLayer")[0] +def HydrologySedimentEnum(): return StringToEnum("HydrologySediment")[0] +def HydrologyEfficientEnum(): return StringToEnum("HydrologyEfficient")[0] +def HydrologySedimentKmaxEnum(): return StringToEnum("HydrologySedimentKmax")[0] +def HydrologysommersEnum(): return StringToEnum("Hydrologysommers")[0] +def HydrologyHeadEnum(): return StringToEnum("HydrologyHead")[0] +def HydrologyGapHeightEnum(): return StringToEnum("HydrologyGapHeight")[0] +def HydrologyBumpSpacingEnum(): return StringToEnum("HydrologyBumpSpacing")[0] +def HydrologyBumpHeightEnum(): return StringToEnum("HydrologyBumpHeight")[0] +def HydrologyEnglacialInputEnum(): return StringToEnum("HydrologyEnglacialInput")[0] +def HydrologyMoulinInputEnum(): return StringToEnum("HydrologyMoulinInput")[0] +def HydrologyReynoldsEnum(): return StringToEnum("HydrologyReynolds")[0] +def HydrologyNeumannfluxEnum(): return StringToEnum("HydrologyNeumannflux")[0] +def HydrologySpcheadEnum(): return StringToEnum("HydrologySpchead")[0] +def HydrologyConductivityEnum(): return StringToEnum("HydrologyConductivity")[0] +def IndependentObjectEnum(): return StringToEnum("IndependentObject")[0] +def InversionControlParametersEnum(): return StringToEnum("InversionControlParameters")[0] +def InversionControlScalingFactorsEnum(): return StringToEnum("InversionControlScalingFactors")[0] +def InversionCostFunctionThresholdEnum(): return StringToEnum("InversionCostFunctionThreshold")[0] +def InversionCostFunctionsCoefficientsEnum(): return StringToEnum("InversionCostFunctionsCoefficients")[0] +def InversionCostFunctionsEnum(): return StringToEnum("InversionCostFunctions")[0] +def InversionGradientScalingEnum(): return StringToEnum("InversionGradientScaling")[0] +def InversionIscontrolEnum(): return StringToEnum("InversionIscontrol")[0] +def InversionTypeEnum(): return StringToEnum("InversionType")[0] +def InversionIncompleteAdjointEnum(): return StringToEnum("InversionIncompleteAdjoint")[0] +def InversionMaxParametersEnum(): return StringToEnum("InversionMaxParameters")[0] +def InversionMaxiterPerStepEnum(): return StringToEnum("InversionMaxiterPerStep")[0] +def InversionMaxiterEnum(): return StringToEnum("InversionMaxiter")[0] +def InversionMaxstepsEnum(): return StringToEnum("InversionMaxsteps")[0] +def InversionFatolEnum(): return StringToEnum("InversionFatol")[0] +def InversionFrtolEnum(): return StringToEnum("InversionFrtol")[0] +def InversionGatolEnum(): return StringToEnum("InversionGatol")[0] +def InversionGrtolEnum(): return StringToEnum("InversionGrtol")[0] +def InversionGttolEnum(): return StringToEnum("InversionGttol")[0] +def InversionAlgorithmEnum(): return StringToEnum("InversionAlgorithm")[0] +def InversionMinParametersEnum(): return StringToEnum("InversionMinParameters")[0] +def InversionNstepsEnum(): return StringToEnum("InversionNsteps")[0] +def InversionDxminEnum(): return StringToEnum("InversionDxmin")[0] +def InversionNumControlParametersEnum(): return StringToEnum("InversionNumControlParameters")[0] +def InversionNumCostFunctionsEnum(): return StringToEnum("InversionNumCostFunctions")[0] +def InversionStepThresholdEnum(): return StringToEnum("InversionStepThreshold")[0] +def InversionThicknessObsEnum(): return StringToEnum("InversionThicknessObs")[0] +def InversionSurfaceObsEnum(): return StringToEnum("InversionSurfaceObs")[0] +def InversionVxObsEnum(): return StringToEnum("InversionVxObs")[0] +def InversionVyObsEnum(): return StringToEnum("InversionVyObs")[0] +def InversionVzObsEnum(): return StringToEnum("InversionVzObs")[0] +def MaskIceLevelsetEnum(): return StringToEnum("MaskIceLevelset")[0] +def MaterialsBetaEnum(): return StringToEnum("MaterialsBeta")[0] +def MaterialsHeatcapacityEnum(): return StringToEnum("MaterialsHeatcapacity")[0] +def MaterialsLatentheatEnum(): return StringToEnum("MaterialsLatentheat")[0] +def MaterialsMeltingpointEnum(): return StringToEnum("MaterialsMeltingpoint")[0] +def MaterialsMixedLayerCapacityEnum(): return StringToEnum("MaterialsMixedLayerCapacity")[0] +def MaterialsRheologyBEnum(): return StringToEnum("MaterialsRheologyB")[0] +def MaterialsRheologyBbarEnum(): return StringToEnum("MaterialsRheologyBbar")[0] +def MaterialsRheologyLawEnum(): return StringToEnum("MaterialsRheologyLaw")[0] +def MaterialsRheologyNEnum(): return StringToEnum("MaterialsRheologyN")[0] +def DamageIsdamageEnum(): return StringToEnum("DamageIsdamage")[0] +def DamageDEnum(): return StringToEnum("DamageD")[0] +def DamageFEnum(): return StringToEnum("DamageF")[0] +def DamageDbarEnum(): return StringToEnum("DamageDbar")[0] +def DamageLawEnum(): return StringToEnum("DamageLaw")[0] +def DamageC1Enum(): return StringToEnum("DamageC1")[0] +def DamageC2Enum(): return StringToEnum("DamageC2")[0] +def DamageC3Enum(): return StringToEnum("DamageC3")[0] +def DamageC4Enum(): return StringToEnum("DamageC4")[0] +def DamageElementinterpEnum(): return StringToEnum("DamageElementinterp")[0] +def DamageHealingEnum(): return StringToEnum("DamageHealing")[0] +def DamageStressThresholdEnum(): return StringToEnum("DamageStressThreshold")[0] +def DamageKappaEnum(): return StringToEnum("DamageKappa")[0] +def DamageStabilizationEnum(): return StringToEnum("DamageStabilization")[0] +def DamageMaxiterEnum(): return StringToEnum("DamageMaxiter")[0] +def DamageSpcdamageEnum(): return StringToEnum("DamageSpcdamage")[0] +def DamageMaxDamageEnum(): return StringToEnum("DamageMaxDamage")[0] +def DamageEquivStressEnum(): return StringToEnum("DamageEquivStress")[0] +def DamageEvolutionNumRequestedOutputsEnum(): return StringToEnum("DamageEvolutionNumRequestedOutputs")[0] +def DamageEvolutionRequestedOutputsEnum(): return StringToEnum("DamageEvolutionRequestedOutputs")[0] +def DamageEnum(): return StringToEnum("Damage")[0] +def NewDamageEnum(): return StringToEnum("NewDamage")[0] +def StressIntensityFactorEnum(): return StringToEnum("StressIntensityFactor")[0] +def CalvingLawEnum(): return StringToEnum("CalvingLaw")[0] +def CalvingCalvingrateEnum(): return StringToEnum("CalvingCalvingrate")[0] +def CalvingMeltingrateEnum(): return StringToEnum("CalvingMeltingrate")[0] +def CalvingLevermannEnum(): return StringToEnum("CalvingLevermann")[0] +def CalvingPiEnum(): return StringToEnum("CalvingPi")[0] +def CalvingDevEnum(): return StringToEnum("CalvingDev")[0] +def DefaultCalvingEnum(): return StringToEnum("DefaultCalving")[0] +def CalvingRequestedOutputsEnum(): return StringToEnum("CalvingRequestedOutputs")[0] +def CalvinglevermannCoeffEnum(): return StringToEnum("CalvinglevermannCoeff")[0] +def CalvinglevermannMeltingrateEnum(): return StringToEnum("CalvinglevermannMeltingrate")[0] +def CalvingpiCoeffEnum(): return StringToEnum("CalvingpiCoeff")[0] +def CalvingpiMeltingrateEnum(): return StringToEnum("CalvingpiMeltingrate")[0] +def CalvingratexEnum(): return StringToEnum("Calvingratex")[0] +def CalvingrateyEnum(): return StringToEnum("Calvingratey")[0] +def CalvingratexAverageEnum(): return StringToEnum("CalvingratexAverage")[0] +def CalvingrateyAverageEnum(): return StringToEnum("CalvingrateyAverage")[0] +def StrainRateparallelEnum(): return StringToEnum("StrainRateparallel")[0] +def StrainRateperpendicularEnum(): return StringToEnum("StrainRateperpendicular")[0] +def StrainRateeffectiveEnum(): return StringToEnum("StrainRateeffective")[0] +def MaterialsRhoIceEnum(): return StringToEnum("MaterialsRhoIce")[0] +def MaterialsRhoSeawaterEnum(): return StringToEnum("MaterialsRhoSeawater")[0] +def MaterialsRhoFreshwaterEnum(): return StringToEnum("MaterialsRhoFreshwater")[0] +def MaterialsMuWaterEnum(): return StringToEnum("MaterialsMuWater")[0] +def MaterialsThermalExchangeVelocityEnum(): return StringToEnum("MaterialsThermalExchangeVelocity")[0] +def MaterialsThermalconductivityEnum(): return StringToEnum("MaterialsThermalconductivity")[0] +def MaterialsTemperateiceconductivityEnum(): return StringToEnum("MaterialsTemperateiceconductivity")[0] +def MaterialsLithosphereShearModulusEnum(): return StringToEnum("MaterialsLithosphereShearModulus")[0] +def MaterialsLithosphereDensityEnum(): return StringToEnum("MaterialsLithosphereDensity")[0] +def MaterialsMantleShearModulusEnum(): return StringToEnum("MaterialsMantleShearModulus")[0] +def MaterialsMantleDensityEnum(): return StringToEnum("MaterialsMantleDensity")[0] +def MeshAverageVertexConnectivityEnum(): return StringToEnum("MeshAverageVertexConnectivity")[0] +def MeshElements2dEnum(): return StringToEnum("MeshElements2d")[0] +def MeshElementsEnum(): return StringToEnum("MeshElements")[0] +def MeshLowerelementsEnum(): return StringToEnum("MeshLowerelements")[0] +def MeshNumberofelements2dEnum(): return StringToEnum("MeshNumberofelements2d")[0] +def MeshNumberofelementsEnum(): return StringToEnum("MeshNumberofelements")[0] +def MeshNumberoflayersEnum(): return StringToEnum("MeshNumberoflayers")[0] +def MeshNumberofvertices2dEnum(): return StringToEnum("MeshNumberofvertices2d")[0] +def MeshNumberofverticesEnum(): return StringToEnum("MeshNumberofvertices")[0] +def MeshUpperelementsEnum(): return StringToEnum("MeshUpperelements")[0] +def MeshVertexonbaseEnum(): return StringToEnum("MeshVertexonbase")[0] +def MeshVertexonsurfaceEnum(): return StringToEnum("MeshVertexonsurface")[0] +def MeshVertexonboundaryEnum(): return StringToEnum("MeshVertexonboundary")[0] +def MeshXEnum(): return StringToEnum("MeshX")[0] +def MeshYEnum(): return StringToEnum("MeshY")[0] +def MeshZEnum(): return StringToEnum("MeshZ")[0] +def MeshElementtypeEnum(): return StringToEnum("MeshElementtype")[0] +def MeshSegmentsEnum(): return StringToEnum("MeshSegments")[0] +def DomainTypeEnum(): return StringToEnum("DomainType")[0] +def DomainDimensionEnum(): return StringToEnum("DomainDimension")[0] +def Domain2DhorizontalEnum(): return StringToEnum("Domain2Dhorizontal")[0] +def Domain2DverticalEnum(): return StringToEnum("Domain2Dvertical")[0] +def Domain3DEnum(): return StringToEnum("Domain3D")[0] +def MiscellaneousNameEnum(): return StringToEnum("MiscellaneousName")[0] +def MasstransportHydrostaticAdjustmentEnum(): return StringToEnum("MasstransportHydrostaticAdjustment")[0] +def MasstransportIsfreesurfaceEnum(): return StringToEnum("MasstransportIsfreesurface")[0] +def MasstransportMinThicknessEnum(): return StringToEnum("MasstransportMinThickness")[0] +def MasstransportPenaltyFactorEnum(): return StringToEnum("MasstransportPenaltyFactor")[0] +def MasstransportSpcthicknessEnum(): return StringToEnum("MasstransportSpcthickness")[0] +def MasstransportStabilizationEnum(): return StringToEnum("MasstransportStabilization")[0] +def MasstransportVertexPairingEnum(): return StringToEnum("MasstransportVertexPairing")[0] +def MasstransportNumRequestedOutputsEnum(): return StringToEnum("MasstransportNumRequestedOutputs")[0] +def MasstransportRequestedOutputsEnum(): return StringToEnum("MasstransportRequestedOutputs")[0] +def QmuIsdakotaEnum(): return StringToEnum("QmuIsdakota")[0] +def MassFluxSegmentsEnum(): return StringToEnum("MassFluxSegments")[0] +def MassFluxSegmentsPresentEnum(): return StringToEnum("MassFluxSegmentsPresent")[0] +def QmuMassFluxSegmentsPresentEnum(): return StringToEnum("QmuMassFluxSegmentsPresent")[0] +def QmuNumberofpartitionsEnum(): return StringToEnum("QmuNumberofpartitions")[0] +def QmuNumberofresponsesEnum(): return StringToEnum("QmuNumberofresponses")[0] +def QmuPartitionEnum(): return StringToEnum("QmuPartition")[0] +def QmuResponsedescriptorsEnum(): return StringToEnum("QmuResponsedescriptors")[0] +def QmuVariabledescriptorsEnum(): return StringToEnum("QmuVariabledescriptors")[0] +def RiftsNumriftsEnum(): return StringToEnum("RiftsNumrifts")[0] +def RiftsRiftstructEnum(): return StringToEnum("RiftsRiftstruct")[0] +def SettingsResultsOnNodesEnum(): return StringToEnum("SettingsResultsOnNodes")[0] +def SettingsIoGatherEnum(): return StringToEnum("SettingsIoGather")[0] +def SettingsLowmemEnum(): return StringToEnum("SettingsLowmem")[0] +def SettingsOutputFrequencyEnum(): return StringToEnum("SettingsOutputFrequency")[0] +def SettingsRecordingFrequencyEnum(): return StringToEnum("SettingsRecordingFrequency")[0] +def SettingsWaitonlockEnum(): return StringToEnum("SettingsWaitonlock")[0] +def DebugProfilingEnum(): return StringToEnum("DebugProfiling")[0] +def ProfilingCurrentMemEnum(): return StringToEnum("ProfilingCurrentMem")[0] +def ProfilingCurrentFlopsEnum(): return StringToEnum("ProfilingCurrentFlops")[0] +def ProfilingSolutionTimeEnum(): return StringToEnum("ProfilingSolutionTime")[0] +def SteadystateMaxiterEnum(): return StringToEnum("SteadystateMaxiter")[0] +def SteadystateNumRequestedOutputsEnum(): return StringToEnum("SteadystateNumRequestedOutputs")[0] +def SteadystateReltolEnum(): return StringToEnum("SteadystateReltol")[0] +def SteadystateRequestedOutputsEnum(): return StringToEnum("SteadystateRequestedOutputs")[0] +def SurfaceEnum(): return StringToEnum("Surface")[0] +def ThermalIsenthalpyEnum(): return StringToEnum("ThermalIsenthalpy")[0] +def ThermalIsdynamicbasalspcEnum(): return StringToEnum("ThermalIsdynamicbasalspc")[0] +def ThermalReltolEnum(): return StringToEnum("ThermalReltol")[0] +def ThermalMaxiterEnum(): return StringToEnum("ThermalMaxiter")[0] +def ThermalPenaltyFactorEnum(): return StringToEnum("ThermalPenaltyFactor")[0] +def ThermalPenaltyLockEnum(): return StringToEnum("ThermalPenaltyLock")[0] +def ThermalPenaltyThresholdEnum(): return StringToEnum("ThermalPenaltyThreshold")[0] +def ThermalSpctemperatureEnum(): return StringToEnum("ThermalSpctemperature")[0] +def ThermalStabilizationEnum(): return StringToEnum("ThermalStabilization")[0] +def ThermalNumRequestedOutputsEnum(): return StringToEnum("ThermalNumRequestedOutputs")[0] +def ThermalRequestedOutputsEnum(): return StringToEnum("ThermalRequestedOutputs")[0] +def GiaMantleViscosityEnum(): return StringToEnum("GiaMantleViscosity")[0] +def GiaLithosphereThicknessEnum(): return StringToEnum("GiaLithosphereThickness")[0] +def ThicknessEnum(): return StringToEnum("Thickness")[0] +def TimesteppingStartTimeEnum(): return StringToEnum("TimesteppingStartTime")[0] +def TimesteppingFinalTimeEnum(): return StringToEnum("TimesteppingFinalTime")[0] +def TimesteppingCflCoefficientEnum(): return StringToEnum("TimesteppingCflCoefficient")[0] +def TimesteppingTimeAdaptEnum(): return StringToEnum("TimesteppingTimeAdapt")[0] +def TimesteppingTimeStepEnum(): return StringToEnum("TimesteppingTimeStep")[0] +def TimesteppingInterpForcingsEnum(): return StringToEnum("TimesteppingInterpForcings")[0] +def TransientIssmbEnum(): return StringToEnum("TransientIssmb")[0] +def TransientIsstressbalanceEnum(): return StringToEnum("TransientIsstressbalance")[0] +def TransientIsgroundinglineEnum(): return StringToEnum("TransientIsgroundingline")[0] +def TransientIsmasstransportEnum(): return StringToEnum("TransientIsmasstransport")[0] +def TransientIsthermalEnum(): return StringToEnum("TransientIsthermal")[0] +def TransientIsgiaEnum(): return StringToEnum("TransientIsgia")[0] +def TransientIsdamageevolutionEnum(): return StringToEnum("TransientIsdamageevolution")[0] +def TransientIshydrologyEnum(): return StringToEnum("TransientIshydrology")[0] +def TransientIscalvingEnum(): return StringToEnum("TransientIscalving")[0] +def TransientNumRequestedOutputsEnum(): return StringToEnum("TransientNumRequestedOutputs")[0] +def TransientRequestedOutputsEnum(): return StringToEnum("TransientRequestedOutputs")[0] +def PotentialEnum(): return StringToEnum("Potential")[0] +def BalancethicknessSpcpotentialEnum(): return StringToEnum("BalancethicknessSpcpotential")[0] +def BalancethicknessApparentMassbalanceEnum(): return StringToEnum("BalancethicknessApparentMassbalance")[0] +def Balancethickness2MisfitEnum(): return StringToEnum("Balancethickness2Misfit")[0] +def BalancethicknessDiffusionCoefficientEnum(): return StringToEnum("BalancethicknessDiffusionCoefficient")[0] +def BalancethicknessCmuEnum(): return StringToEnum("BalancethicknessCmu")[0] +def BalancethicknessOmegaEnum(): return StringToEnum("BalancethicknessOmega")[0] +def BalancethicknessD0Enum(): return StringToEnum("BalancethicknessD0")[0] +def SmbEnum(): return StringToEnum("Smb")[0] +def SmbAnalysisEnum(): return StringToEnum("SmbAnalysis")[0] +def SmbSolutionEnum(): return StringToEnum("SmbSolution")[0] +def SmbNumRequestedOutputsEnum(): return StringToEnum("SmbNumRequestedOutputs")[0] +def SmbRequestedOutputsEnum(): return StringToEnum("SmbRequestedOutputs")[0] +def SmbIsInitializedEnum(): return StringToEnum("SmbIsInitialized")[0] +def SMBforcingEnum(): return StringToEnum("SMBforcing")[0] +def SmbMassBalanceEnum(): return StringToEnum("SmbMassBalance")[0] +def SMBgembEnum(): return StringToEnum("SMBgemb")[0] +def SmbInitDensityScalingEnum(): return StringToEnum("SmbInitDensityScaling")[0] +def SmbTaEnum(): return StringToEnum("SmbTa")[0] +def SmbVEnum(): return StringToEnum("SmbV")[0] +def SmbDswrfEnum(): return StringToEnum("SmbDswrf")[0] +def SmbDlwrfEnum(): return StringToEnum("SmbDlwrf")[0] +def SmbPEnum(): return StringToEnum("SmbP")[0] +def SmbSwfEnum(): return StringToEnum("SmbSwf")[0] +def SmbEAirEnum(): return StringToEnum("SmbEAir")[0] +def SmbPAirEnum(): return StringToEnum("SmbPAir")[0] +def SmbTmeanEnum(): return StringToEnum("SmbTmean")[0] +def SmbCEnum(): return StringToEnum("SmbC")[0] +def SmbTzEnum(): return StringToEnum("SmbTz")[0] +def SmbVzEnum(): return StringToEnum("SmbVz")[0] +def SmbDtEnum(): return StringToEnum("SmbDt")[0] +def SmbDzEnum(): return StringToEnum("SmbDz")[0] +def SmbAIdxEnum(): return StringToEnum("SmbAIdx")[0] +def SmbSwIdxEnum(): return StringToEnum("SmbSwIdx")[0] +def SmbDenIdxEnum(): return StringToEnum("SmbDenIdx")[0] +def SmbZTopEnum(): return StringToEnum("SmbZTop")[0] +def SmbDzTopEnum(): return StringToEnum("SmbDzTop")[0] +def SmbDzMinEnum(): return StringToEnum("SmbDzMin")[0] +def SmbZYEnum(): return StringToEnum("SmbZY")[0] +def SmbZMaxEnum(): return StringToEnum("SmbZMax")[0] +def SmbZMinEnum(): return StringToEnum("SmbZMin")[0] +def SmbOutputFreqEnum(): return StringToEnum("SmbOutputFreq")[0] +def SmbASnowEnum(): return StringToEnum("SmbASnow")[0] +def SmbAIceEnum(): return StringToEnum("SmbAIce")[0] +def SmbCldFracEnum(): return StringToEnum("SmbCldFrac")[0] +def SmbT0wetEnum(): return StringToEnum("SmbT0wet")[0] +def SmbT0dryEnum(): return StringToEnum("SmbT0dry")[0] +def SmbKEnum(): return StringToEnum("SmbK")[0] +def SmbDEnum(): return StringToEnum("SmbD")[0] +def SmbReEnum(): return StringToEnum("SmbRe")[0] +def SmbGdnEnum(): return StringToEnum("SmbGdn")[0] +def SmbGspEnum(): return StringToEnum("SmbGsp")[0] +def SmbECEnum(): return StringToEnum("SmbEC")[0] +def SmbCondensationEnum(): return StringToEnum("SmbCondensation")[0] +def SmbWEnum(): return StringToEnum("SmbW")[0] +def SmbAEnum(): return StringToEnum("SmbA")[0] +def SmbTEnum(): return StringToEnum("SmbT")[0] +def SmbIsgraingrowthEnum(): return StringToEnum("SmbIsgraingrowth")[0] +def SmbIsalbedoEnum(): return StringToEnum("SmbIsalbedo")[0] +def SmbIsshortwaveEnum(): return StringToEnum("SmbIsshortwave")[0] +def SmbIsthermalEnum(): return StringToEnum("SmbIsthermal")[0] +def SmbIsaccumulationEnum(): return StringToEnum("SmbIsaccumulation")[0] +def SmbIsmeltEnum(): return StringToEnum("SmbIsmelt")[0] +def SmbIsdensificationEnum(): return StringToEnum("SmbIsdensification")[0] +def SmbIsturbulentfluxEnum(): return StringToEnum("SmbIsturbulentflux")[0] +def SMBpddEnum(): return StringToEnum("SMBpdd")[0] +def SmbDelta18oEnum(): return StringToEnum("SmbDelta18o")[0] +def SmbDelta18oSurfaceEnum(): return StringToEnum("SmbDelta18oSurface")[0] +def SmbIsdelta18oEnum(): return StringToEnum("SmbIsdelta18o")[0] +def SmbIsmungsmEnum(): return StringToEnum("SmbIsmungsm")[0] +def SmbIsd18opdEnum(): return StringToEnum("SmbIsd18opd")[0] +def SmbPrecipitationsPresentdayEnum(): return StringToEnum("SmbPrecipitationsPresentday")[0] +def SmbPrecipitationsLgmEnum(): return StringToEnum("SmbPrecipitationsLgm")[0] +def SmbTemperaturesPresentdayEnum(): return StringToEnum("SmbTemperaturesPresentday")[0] +def SmbTemperaturesLgmEnum(): return StringToEnum("SmbTemperaturesLgm")[0] +def SmbPrecipitationEnum(): return StringToEnum("SmbPrecipitation")[0] +def SmbDesfacEnum(): return StringToEnum("SmbDesfac")[0] +def SmbS0pEnum(): return StringToEnum("SmbS0p")[0] +def SmbS0tEnum(): return StringToEnum("SmbS0t")[0] +def SmbRlapsEnum(): return StringToEnum("SmbRlaps")[0] +def SmbRlapslgmEnum(): return StringToEnum("SmbRlapslgm")[0] +def SmbPfacEnum(): return StringToEnum("SmbPfac")[0] +def SmbTdiffEnum(): return StringToEnum("SmbTdiff")[0] +def SmbSealevEnum(): return StringToEnum("SmbSealev")[0] +def SMBd18opddEnum(): return StringToEnum("SMBd18opdd")[0] +def SmbDpermilEnum(): return StringToEnum("SmbDpermil")[0] +def SMBgradientsEnum(): return StringToEnum("SMBgradients")[0] +def SmbMonthlytemperaturesEnum(): return StringToEnum("SmbMonthlytemperatures")[0] +def SmbHrefEnum(): return StringToEnum("SmbHref")[0] +def SmbSmbrefEnum(): return StringToEnum("SmbSmbref")[0] +def SmbBPosEnum(): return StringToEnum("SmbBPos")[0] +def SmbBNegEnum(): return StringToEnum("SmbBNeg")[0] +def SMBhenningEnum(): return StringToEnum("SMBhenning")[0] +def SMBcomponentsEnum(): return StringToEnum("SMBcomponents")[0] +def SmbAccumulationEnum(): return StringToEnum("SmbAccumulation")[0] +def SmbEvaporationEnum(): return StringToEnum("SmbEvaporation")[0] +def SmbRunoffEnum(): return StringToEnum("SmbRunoff")[0] +def SMBmeltcomponentsEnum(): return StringToEnum("SMBmeltcomponents")[0] +def SmbMeltEnum(): return StringToEnum("SmbMelt")[0] +def SmbRefreezeEnum(): return StringToEnum("SmbRefreeze")[0] +def SMBgcmEnum(): return StringToEnum("SMBgcm")[0] +def SmbIspddEnum(): return StringToEnum("SmbIspdd")[0] +def SmbIssmbgradientsEnum(): return StringToEnum("SmbIssmbgradients")[0] +def SolutionTypeEnum(): return StringToEnum("SolutionType")[0] +def AnalysisTypeEnum(): return StringToEnum("AnalysisType")[0] +def ConfigurationTypeEnum(): return StringToEnum("ConfigurationType")[0] +def AdjointBalancethicknessAnalysisEnum(): return StringToEnum("AdjointBalancethicknessAnalysis")[0] +def AdjointBalancethickness2AnalysisEnum(): return StringToEnum("AdjointBalancethickness2Analysis")[0] +def AdjointHorizAnalysisEnum(): return StringToEnum("AdjointHorizAnalysis")[0] +def AnalysisCounterEnum(): return StringToEnum("AnalysisCounter")[0] +def DefaultAnalysisEnum(): return StringToEnum("DefaultAnalysis")[0] +def BalancethicknessAnalysisEnum(): return StringToEnum("BalancethicknessAnalysis")[0] +def BalancethicknessSolutionEnum(): return StringToEnum("BalancethicknessSolution")[0] +def Balancethickness2AnalysisEnum(): return StringToEnum("Balancethickness2Analysis")[0] +def Balancethickness2SolutionEnum(): return StringToEnum("Balancethickness2Solution")[0] +def BalancethicknessSoftAnalysisEnum(): return StringToEnum("BalancethicknessSoftAnalysis")[0] +def BalancethicknessSoftSolutionEnum(): return StringToEnum("BalancethicknessSoftSolution")[0] +def BalancevelocityAnalysisEnum(): return StringToEnum("BalancevelocityAnalysis")[0] +def BalancevelocitySolutionEnum(): return StringToEnum("BalancevelocitySolution")[0] +def L2ProjectionEPLAnalysisEnum(): return StringToEnum("L2ProjectionEPLAnalysis")[0] +def L2ProjectionBaseAnalysisEnum(): return StringToEnum("L2ProjectionBaseAnalysis")[0] +def BedSlopeSolutionEnum(): return StringToEnum("BedSlopeSolution")[0] +def DamageEvolutionSolutionEnum(): return StringToEnum("DamageEvolutionSolution")[0] +def DamageEvolutionAnalysisEnum(): return StringToEnum("DamageEvolutionAnalysis")[0] +def StressbalanceAnalysisEnum(): return StringToEnum("StressbalanceAnalysis")[0] +def StressbalanceSIAAnalysisEnum(): return StringToEnum("StressbalanceSIAAnalysis")[0] +def StressbalanceSolutionEnum(): return StringToEnum("StressbalanceSolution")[0] +def StressbalanceVerticalAnalysisEnum(): return StringToEnum("StressbalanceVerticalAnalysis")[0] +def EnthalpyAnalysisEnum(): return StringToEnum("EnthalpyAnalysis")[0] +def FlaimAnalysisEnum(): return StringToEnum("FlaimAnalysis")[0] +def FlaimSolutionEnum(): return StringToEnum("FlaimSolution")[0] +def HydrologyShreveAnalysisEnum(): return StringToEnum("HydrologyShreveAnalysis")[0] +def HydrologyDCInefficientAnalysisEnum(): return StringToEnum("HydrologyDCInefficientAnalysis")[0] +def HydrologyDCEfficientAnalysisEnum(): return StringToEnum("HydrologyDCEfficientAnalysis")[0] +def HydrologySommersAnalysisEnum(): return StringToEnum("HydrologySommersAnalysis")[0] +def HydrologySolutionEnum(): return StringToEnum("HydrologySolution")[0] +def MeltingAnalysisEnum(): return StringToEnum("MeltingAnalysis")[0] +def MasstransportAnalysisEnum(): return StringToEnum("MasstransportAnalysis")[0] +def MasstransportSolutionEnum(): return StringToEnum("MasstransportSolution")[0] +def FreeSurfaceBaseAnalysisEnum(): return StringToEnum("FreeSurfaceBaseAnalysis")[0] +def FreeSurfaceTopAnalysisEnum(): return StringToEnum("FreeSurfaceTopAnalysis")[0] +def SurfaceNormalVelocityEnum(): return StringToEnum("SurfaceNormalVelocity")[0] +def ExtrudeFromBaseAnalysisEnum(): return StringToEnum("ExtrudeFromBaseAnalysis")[0] +def ExtrudeFromTopAnalysisEnum(): return StringToEnum("ExtrudeFromTopAnalysis")[0] +def DepthAverageAnalysisEnum(): return StringToEnum("DepthAverageAnalysis")[0] +def SteadystateSolutionEnum(): return StringToEnum("SteadystateSolution")[0] +def SurfaceSlopeSolutionEnum(): return StringToEnum("SurfaceSlopeSolution")[0] +def SmoothAnalysisEnum(): return StringToEnum("SmoothAnalysis")[0] +def ThermalAnalysisEnum(): return StringToEnum("ThermalAnalysis")[0] +def ThermalSolutionEnum(): return StringToEnum("ThermalSolution")[0] +def TransientSolutionEnum(): return StringToEnum("TransientSolution")[0] +def UzawaPressureAnalysisEnum(): return StringToEnum("UzawaPressureAnalysis")[0] +def GiaSolutionEnum(): return StringToEnum("GiaSolution")[0] +def GiaAnalysisEnum(): return StringToEnum("GiaAnalysis")[0] +def MeshdeformationSolutionEnum(): return StringToEnum("MeshdeformationSolution")[0] +def MeshdeformationAnalysisEnum(): return StringToEnum("MeshdeformationAnalysis")[0] +def LevelsetAnalysisEnum(): return StringToEnum("LevelsetAnalysis")[0] +def LevelsetStabilizationEnum(): return StringToEnum("LevelsetStabilization")[0] +def ExtrapolationAnalysisEnum(): return StringToEnum("ExtrapolationAnalysis")[0] +def LsfReinitializationAnalysisEnum(): return StringToEnum("LsfReinitializationAnalysis")[0] +def ApproximationEnum(): return StringToEnum("Approximation")[0] +def NoneApproximationEnum(): return StringToEnum("NoneApproximation")[0] +def SIAApproximationEnum(): return StringToEnum("SIAApproximation")[0] +def SSAApproximationEnum(): return StringToEnum("SSAApproximation")[0] +def SSAHOApproximationEnum(): return StringToEnum("SSAHOApproximation")[0] +def SSAFSApproximationEnum(): return StringToEnum("SSAFSApproximation")[0] +def L1L2ApproximationEnum(): return StringToEnum("L1L2Approximation")[0] +def HOApproximationEnum(): return StringToEnum("HOApproximation")[0] +def HOFSApproximationEnum(): return StringToEnum("HOFSApproximation")[0] +def FSApproximationEnum(): return StringToEnum("FSApproximation")[0] +def FSvelocityEnum(): return StringToEnum("FSvelocity")[0] +def FSpressureEnum(): return StringToEnum("FSpressure")[0] +def DataSetEnum(): return StringToEnum("DataSet")[0] +def ConstraintsEnum(): return StringToEnum("Constraints")[0] +def LoadsEnum(): return StringToEnum("Loads")[0] +def MaterialsEnum(): return StringToEnum("Materials")[0] +def NodesEnum(): return StringToEnum("Nodes")[0] +def ContoursEnum(): return StringToEnum("Contours")[0] +def ParametersEnum(): return StringToEnum("Parameters")[0] +def VerticesEnum(): return StringToEnum("Vertices")[0] +def ResultsEnum(): return StringToEnum("Results")[0] +def GenericParamEnum(): return StringToEnum("GenericParam")[0] +def AdolcParamEnum(): return StringToEnum("AdolcParam")[0] +def BoolInputEnum(): return StringToEnum("BoolInput")[0] +def BoolParamEnum(): return StringToEnum("BoolParam")[0] +def ContourEnum(): return StringToEnum("Contour")[0] +def ControlInputEnum(): return StringToEnum("ControlInput")[0] +def DatasetInputEnum(): return StringToEnum("DatasetInput")[0] +def DoubleInputEnum(): return StringToEnum("DoubleInput")[0] +def DoubleArrayInputEnum(): return StringToEnum("DoubleArrayInput")[0] +def DataSetParamEnum(): return StringToEnum("DataSetParam")[0] +def DoubleMatArrayParamEnum(): return StringToEnum("DoubleMatArrayParam")[0] +def DoubleMatParamEnum(): return StringToEnum("DoubleMatParam")[0] +def DoubleParamEnum(): return StringToEnum("DoubleParam")[0] +def DoubleVecParamEnum(): return StringToEnum("DoubleVecParam")[0] +def ElementEnum(): return StringToEnum("Element")[0] +def ElementHookEnum(): return StringToEnum("ElementHook")[0] +def HookEnum(): return StringToEnum("Hook")[0] +def ExternalResultEnum(): return StringToEnum("ExternalResult")[0] +def FileParamEnum(): return StringToEnum("FileParam")[0] +def InputEnum(): return StringToEnum("Input")[0] +def IntInputEnum(): return StringToEnum("IntInput")[0] +def InputToExtrudeEnum(): return StringToEnum("InputToExtrude")[0] +def InputToL2ProjectEnum(): return StringToEnum("InputToL2Project")[0] +def InputToDepthaverageEnum(): return StringToEnum("InputToDepthaverage")[0] +def InputToSmoothEnum(): return StringToEnum("InputToSmooth")[0] +def SmoothThicknessMultiplierEnum(): return StringToEnum("SmoothThicknessMultiplier")[0] +def IntParamEnum(): return StringToEnum("IntParam")[0] +def IntVecParamEnum(): return StringToEnum("IntVecParam")[0] +def TransientParamEnum(): return StringToEnum("TransientParam")[0] +def MaticeEnum(): return StringToEnum("Matice")[0] +def MatdamageiceEnum(): return StringToEnum("Matdamageice")[0] +def MatparEnum(): return StringToEnum("Matpar")[0] +def NodeEnum(): return StringToEnum("Node")[0] +def NumericalfluxEnum(): return StringToEnum("Numericalflux")[0] +def NumericalfluxTypeEnum(): return StringToEnum("NumericalfluxType")[0] +def NeumannfluxEnum(): return StringToEnum("Neumannflux")[0] +def ParamEnum(): return StringToEnum("Param")[0] +def MoulinEnum(): return StringToEnum("Moulin")[0] +def PengridEnum(): return StringToEnum("Pengrid")[0] +def PenpairEnum(): return StringToEnum("Penpair")[0] +def ProfilerEnum(): return StringToEnum("Profiler")[0] +def MatrixParamEnum(): return StringToEnum("MatrixParam")[0] +def MassconEnum(): return StringToEnum("Masscon")[0] +def MassconNameEnum(): return StringToEnum("MassconName")[0] +def MassconDefinitionenumEnum(): return StringToEnum("MassconDefinitionenum")[0] +def MassconLevelsetEnum(): return StringToEnum("MassconLevelset")[0] +def MassconaxpbyEnum(): return StringToEnum("Massconaxpby")[0] +def MassconaxpbyNameEnum(): return StringToEnum("MassconaxpbyName")[0] +def MassconaxpbyDefinitionenumEnum(): return StringToEnum("MassconaxpbyDefinitionenum")[0] +def MassconaxpbyNamexEnum(): return StringToEnum("MassconaxpbyNamex")[0] +def MassconaxpbyNameyEnum(): return StringToEnum("MassconaxpbyNamey")[0] +def MassconaxpbyAlphaEnum(): return StringToEnum("MassconaxpbyAlpha")[0] +def MassconaxpbyBetaEnum(): return StringToEnum("MassconaxpbyBeta")[0] +def NodeSIdEnum(): return StringToEnum("NodeSId")[0] +def VectorParamEnum(): return StringToEnum("VectorParam")[0] +def RiftfrontEnum(): return StringToEnum("Riftfront")[0] +def RiftfrontTypeEnum(): return StringToEnum("RiftfrontType")[0] +def SegmentEnum(): return StringToEnum("Segment")[0] +def SegmentRiftfrontEnum(): return StringToEnum("SegmentRiftfront")[0] +def SpcDynamicEnum(): return StringToEnum("SpcDynamic")[0] +def SpcStaticEnum(): return StringToEnum("SpcStatic")[0] +def SpcTransientEnum(): return StringToEnum("SpcTransient")[0] +def StringArrayParamEnum(): return StringToEnum("StringArrayParam")[0] +def StringParamEnum(): return StringToEnum("StringParam")[0] +def SegEnum(): return StringToEnum("Seg")[0] +def SegInputEnum(): return StringToEnum("SegInput")[0] +def TriaEnum(): return StringToEnum("Tria")[0] +def TriaInputEnum(): return StringToEnum("TriaInput")[0] +def TetraEnum(): return StringToEnum("Tetra")[0] +def TetraInputEnum(): return StringToEnum("TetraInput")[0] +def PentaEnum(): return StringToEnum("Penta")[0] +def PentaInputEnum(): return StringToEnum("PentaInput")[0] +def VertexEnum(): return StringToEnum("Vertex")[0] +def VertexPIdEnum(): return StringToEnum("VertexPId")[0] +def VertexSIdEnum(): return StringToEnum("VertexSId")[0] +def AirEnum(): return StringToEnum("Air")[0] +def IceEnum(): return StringToEnum("Ice")[0] +def MelangeEnum(): return StringToEnum("Melange")[0] +def WaterEnum(): return StringToEnum("Water")[0] +def ClosedEnum(): return StringToEnum("Closed")[0] +def FreeEnum(): return StringToEnum("Free")[0] +def OpenEnum(): return StringToEnum("Open")[0] +def AdjointpEnum(): return StringToEnum("Adjointp")[0] +def AdjointxEnum(): return StringToEnum("Adjointx")[0] +def AdjointyEnum(): return StringToEnum("Adjointy")[0] +def AdjointzEnum(): return StringToEnum("Adjointz")[0] +def BalancethicknessMisfitEnum(): return StringToEnum("BalancethicknessMisfit")[0] +def BedSlopeXEnum(): return StringToEnum("BedSlopeX")[0] +def BedSlopeYEnum(): return StringToEnum("BedSlopeY")[0] +def BoundaryEnum(): return StringToEnum("Boundary")[0] +def ConvergedEnum(): return StringToEnum("Converged")[0] +def FillEnum(): return StringToEnum("Fill")[0] +def FractionIncrementEnum(): return StringToEnum("FractionIncrement")[0] +def FrictionEnum(): return StringToEnum("Friction")[0] +def InternalEnum(): return StringToEnum("Internal")[0] +def MassFluxEnum(): return StringToEnum("MassFlux")[0] +def MeltingOffsetEnum(): return StringToEnum("MeltingOffset")[0] +def MisfitEnum(): return StringToEnum("Misfit")[0] +def PressureEnum(): return StringToEnum("Pressure")[0] +def PressurePicardEnum(): return StringToEnum("PressurePicard")[0] +def AndroidFrictionCoefficientEnum(): return StringToEnum("AndroidFrictionCoefficient")[0] +def ResetPenaltiesEnum(): return StringToEnum("ResetPenalties")[0] +def SegmentOnIceShelfEnum(): return StringToEnum("SegmentOnIceShelf")[0] +def SurfaceAbsVelMisfitEnum(): return StringToEnum("SurfaceAbsVelMisfit")[0] +def SurfaceAreaEnum(): return StringToEnum("SurfaceArea")[0] +def SurfaceAverageVelMisfitEnum(): return StringToEnum("SurfaceAverageVelMisfit")[0] +def SurfaceLogVelMisfitEnum(): return StringToEnum("SurfaceLogVelMisfit")[0] +def SurfaceLogVxVyMisfitEnum(): return StringToEnum("SurfaceLogVxVyMisfit")[0] +def SurfaceRelVelMisfitEnum(): return StringToEnum("SurfaceRelVelMisfit")[0] +def SurfaceSlopeXEnum(): return StringToEnum("SurfaceSlopeX")[0] +def SurfaceSlopeYEnum(): return StringToEnum("SurfaceSlopeY")[0] +def TemperatureEnum(): return StringToEnum("Temperature")[0] +def TemperaturePicardEnum(): return StringToEnum("TemperaturePicard")[0] +def TemperaturePDDEnum(): return StringToEnum("TemperaturePDD")[0] +def ThicknessAbsMisfitEnum(): return StringToEnum("ThicknessAbsMisfit")[0] +def SurfaceAbsMisfitEnum(): return StringToEnum("SurfaceAbsMisfit")[0] +def VelEnum(): return StringToEnum("Vel")[0] +def VelocityEnum(): return StringToEnum("Velocity")[0] +def VxAverageEnum(): return StringToEnum("VxAverage")[0] +def VxEnum(): return StringToEnum("Vx")[0] +def VxPicardEnum(): return StringToEnum("VxPicard")[0] +def VyAverageEnum(): return StringToEnum("VyAverage")[0] +def VyEnum(): return StringToEnum("Vy")[0] +def VyPicardEnum(): return StringToEnum("VyPicard")[0] +def VzEnum(): return StringToEnum("Vz")[0] +def VzSSAEnum(): return StringToEnum("VzSSA")[0] +def VzHOEnum(): return StringToEnum("VzHO")[0] +def VzPicardEnum(): return StringToEnum("VzPicard")[0] +def VzFSEnum(): return StringToEnum("VzFS")[0] +def VxMeshEnum(): return StringToEnum("VxMesh")[0] +def VyMeshEnum(): return StringToEnum("VyMesh")[0] +def VzMeshEnum(): return StringToEnum("VzMesh")[0] +def EnthalpyEnum(): return StringToEnum("Enthalpy")[0] +def EnthalpyPicardEnum(): return StringToEnum("EnthalpyPicard")[0] +def ThicknessAbsGradientEnum(): return StringToEnum("ThicknessAbsGradient")[0] +def ThicknessAlongGradientEnum(): return StringToEnum("ThicknessAlongGradient")[0] +def ThicknessAcrossGradientEnum(): return StringToEnum("ThicknessAcrossGradient")[0] +def IntMatParamEnum(): return StringToEnum("IntMatParam")[0] +def RheologyBbarAbsGradientEnum(): return StringToEnum("RheologyBbarAbsGradient")[0] +def RheologyBAbsGradientEnum(): return StringToEnum("RheologyBAbsGradient")[0] +def DragCoefficientAbsGradientEnum(): return StringToEnum("DragCoefficientAbsGradient")[0] +def TransientInputEnum(): return StringToEnum("TransientInput")[0] +def WaterfractionEnum(): return StringToEnum("Waterfraction")[0] +def WatercolumnEnum(): return StringToEnum("Watercolumn")[0] +def BasalFrictionEnum(): return StringToEnum("BasalFriction")[0] +def ViscousHeatingEnum(): return StringToEnum("ViscousHeating")[0] +def HydrologyWaterVxEnum(): return StringToEnum("HydrologyWaterVx")[0] +def HydrologyWaterVyEnum(): return StringToEnum("HydrologyWaterVy")[0] +def DrivingStressXEnum(): return StringToEnum("DrivingStressX")[0] +def DrivingStressYEnum(): return StringToEnum("DrivingStressY")[0] +def SigmaNNEnum(): return StringToEnum("SigmaNN")[0] +def StressTensorEnum(): return StringToEnum("StressTensor")[0] +def StressTensorxxEnum(): return StringToEnum("StressTensorxx")[0] +def StressTensorxyEnum(): return StringToEnum("StressTensorxy")[0] +def StressTensorxzEnum(): return StringToEnum("StressTensorxz")[0] +def StressTensoryyEnum(): return StringToEnum("StressTensoryy")[0] +def StressTensoryzEnum(): return StringToEnum("StressTensoryz")[0] +def StressTensorzzEnum(): return StringToEnum("StressTensorzz")[0] +def StressMaxPrincipalEnum(): return StringToEnum("StressMaxPrincipal")[0] +def DeviatoricStressEnum(): return StringToEnum("DeviatoricStress")[0] +def DeviatoricStressxxEnum(): return StringToEnum("DeviatoricStressxx")[0] +def DeviatoricStressxyEnum(): return StringToEnum("DeviatoricStressxy")[0] +def DeviatoricStressxzEnum(): return StringToEnum("DeviatoricStressxz")[0] +def DeviatoricStressyyEnum(): return StringToEnum("DeviatoricStressyy")[0] +def DeviatoricStressyzEnum(): return StringToEnum("DeviatoricStressyz")[0] +def DeviatoricStresszzEnum(): return StringToEnum("DeviatoricStresszz")[0] +def DeviatoricStresseffectiveEnum(): return StringToEnum("DeviatoricStresseffective")[0] +def StrainRateEnum(): return StringToEnum("StrainRate")[0] +def StrainRatexxEnum(): return StringToEnum("StrainRatexx")[0] +def StrainRatexyEnum(): return StringToEnum("StrainRatexy")[0] +def StrainRatexzEnum(): return StringToEnum("StrainRatexz")[0] +def StrainRateyyEnum(): return StringToEnum("StrainRateyy")[0] +def StrainRateyzEnum(): return StringToEnum("StrainRateyz")[0] +def StrainRatezzEnum(): return StringToEnum("StrainRatezz")[0] +def DivergenceEnum(): return StringToEnum("Divergence")[0] +def MaxDivergenceEnum(): return StringToEnum("MaxDivergence")[0] +def GiaCrossSectionShapeEnum(): return StringToEnum("GiaCrossSectionShape")[0] +def GiadWdtEnum(): return StringToEnum("GiadWdt")[0] +def GiaWEnum(): return StringToEnum("GiaW")[0] +def P0Enum(): return StringToEnum("P0")[0] +def P0ArrayEnum(): return StringToEnum("P0Array")[0] +def P1Enum(): return StringToEnum("P1")[0] +def P1DGEnum(): return StringToEnum("P1DG")[0] +def P1bubbleEnum(): return StringToEnum("P1bubble")[0] +def P1bubblecondensedEnum(): return StringToEnum("P1bubblecondensed")[0] +def P2Enum(): return StringToEnum("P2")[0] +def P2bubbleEnum(): return StringToEnum("P2bubble")[0] +def P2bubblecondensedEnum(): return StringToEnum("P2bubblecondensed")[0] +def P2xP1Enum(): return StringToEnum("P2xP1")[0] +def P1xP2Enum(): return StringToEnum("P1xP2")[0] +def P1xP3Enum(): return StringToEnum("P1xP3")[0] +def P2xP4Enum(): return StringToEnum("P2xP4")[0] +def P1P1Enum(): return StringToEnum("P1P1")[0] +def P1P1GLSEnum(): return StringToEnum("P1P1GLS")[0] +def MINIEnum(): return StringToEnum("MINI")[0] +def MINIcondensedEnum(): return StringToEnum("MINIcondensed")[0] +def TaylorHoodEnum(): return StringToEnum("TaylorHood")[0] +def LATaylorHoodEnum(): return StringToEnum("LATaylorHood")[0] +def XTaylorHoodEnum(): return StringToEnum("XTaylorHood")[0] +def OneLayerP4zEnum(): return StringToEnum("OneLayerP4z")[0] +def CrouzeixRaviartEnum(): return StringToEnum("CrouzeixRaviart")[0] +def LACrouzeixRaviartEnum(): return StringToEnum("LACrouzeixRaviart")[0] +def SaveResultsEnum(): return StringToEnum("SaveResults")[0] +def BoolExternalResultEnum(): return StringToEnum("BoolExternalResult")[0] +def DoubleExternalResultEnum(): return StringToEnum("DoubleExternalResult")[0] +def DoubleMatExternalResultEnum(): return StringToEnum("DoubleMatExternalResult")[0] +def IntExternalResultEnum(): return StringToEnum("IntExternalResult")[0] +def JEnum(): return StringToEnum("J")[0] +def StringExternalResultEnum(): return StringToEnum("StringExternalResult")[0] +def StepEnum(): return StringToEnum("Step")[0] +def TimeEnum(): return StringToEnum("Time")[0] +def WaterColumnOldEnum(): return StringToEnum("WaterColumnOld")[0] +def OutputdefinitionEnum(): return StringToEnum("Outputdefinition")[0] +def Outputdefinition1Enum(): return StringToEnum("Outputdefinition1")[0] +def Outputdefinition2Enum(): return StringToEnum("Outputdefinition2")[0] +def Outputdefinition3Enum(): return StringToEnum("Outputdefinition3")[0] +def Outputdefinition4Enum(): return StringToEnum("Outputdefinition4")[0] +def Outputdefinition5Enum(): return StringToEnum("Outputdefinition5")[0] +def Outputdefinition6Enum(): return StringToEnum("Outputdefinition6")[0] +def Outputdefinition7Enum(): return StringToEnum("Outputdefinition7")[0] +def Outputdefinition8Enum(): return StringToEnum("Outputdefinition8")[0] +def Outputdefinition9Enum(): return StringToEnum("Outputdefinition9")[0] +def Outputdefinition10Enum(): return StringToEnum("Outputdefinition10")[0] +def Outputdefinition11Enum(): return StringToEnum("Outputdefinition11")[0] +def Outputdefinition12Enum(): return StringToEnum("Outputdefinition12")[0] +def Outputdefinition13Enum(): return StringToEnum("Outputdefinition13")[0] +def Outputdefinition14Enum(): return StringToEnum("Outputdefinition14")[0] +def Outputdefinition15Enum(): return StringToEnum("Outputdefinition15")[0] +def Outputdefinition16Enum(): return StringToEnum("Outputdefinition16")[0] +def Outputdefinition17Enum(): return StringToEnum("Outputdefinition17")[0] +def Outputdefinition18Enum(): return StringToEnum("Outputdefinition18")[0] +def Outputdefinition19Enum(): return StringToEnum("Outputdefinition19")[0] +def Outputdefinition20Enum(): return StringToEnum("Outputdefinition20")[0] +def Outputdefinition21Enum(): return StringToEnum("Outputdefinition21")[0] +def Outputdefinition22Enum(): return StringToEnum("Outputdefinition22")[0] +def Outputdefinition23Enum(): return StringToEnum("Outputdefinition23")[0] +def Outputdefinition24Enum(): return StringToEnum("Outputdefinition24")[0] +def Outputdefinition25Enum(): return StringToEnum("Outputdefinition25")[0] +def Outputdefinition26Enum(): return StringToEnum("Outputdefinition26")[0] +def Outputdefinition27Enum(): return StringToEnum("Outputdefinition27")[0] +def Outputdefinition28Enum(): return StringToEnum("Outputdefinition28")[0] +def Outputdefinition29Enum(): return StringToEnum("Outputdefinition29")[0] +def Outputdefinition30Enum(): return StringToEnum("Outputdefinition30")[0] +def Outputdefinition31Enum(): return StringToEnum("Outputdefinition31")[0] +def Outputdefinition32Enum(): return StringToEnum("Outputdefinition32")[0] +def Outputdefinition33Enum(): return StringToEnum("Outputdefinition33")[0] +def Outputdefinition34Enum(): return StringToEnum("Outputdefinition34")[0] +def Outputdefinition35Enum(): return StringToEnum("Outputdefinition35")[0] +def Outputdefinition36Enum(): return StringToEnum("Outputdefinition36")[0] +def Outputdefinition37Enum(): return StringToEnum("Outputdefinition37")[0] +def Outputdefinition38Enum(): return StringToEnum("Outputdefinition38")[0] +def Outputdefinition39Enum(): return StringToEnum("Outputdefinition39")[0] +def Outputdefinition40Enum(): return StringToEnum("Outputdefinition40")[0] +def Outputdefinition41Enum(): return StringToEnum("Outputdefinition41")[0] +def Outputdefinition42Enum(): return StringToEnum("Outputdefinition42")[0] +def Outputdefinition43Enum(): return StringToEnum("Outputdefinition43")[0] +def Outputdefinition44Enum(): return StringToEnum("Outputdefinition44")[0] +def Outputdefinition45Enum(): return StringToEnum("Outputdefinition45")[0] +def Outputdefinition46Enum(): return StringToEnum("Outputdefinition46")[0] +def Outputdefinition47Enum(): return StringToEnum("Outputdefinition47")[0] +def Outputdefinition48Enum(): return StringToEnum("Outputdefinition48")[0] +def Outputdefinition49Enum(): return StringToEnum("Outputdefinition49")[0] +def Outputdefinition50Enum(): return StringToEnum("Outputdefinition50")[0] +def Outputdefinition51Enum(): return StringToEnum("Outputdefinition51")[0] +def Outputdefinition52Enum(): return StringToEnum("Outputdefinition52")[0] +def Outputdefinition53Enum(): return StringToEnum("Outputdefinition53")[0] +def Outputdefinition54Enum(): return StringToEnum("Outputdefinition54")[0] +def Outputdefinition55Enum(): return StringToEnum("Outputdefinition55")[0] +def Outputdefinition56Enum(): return StringToEnum("Outputdefinition56")[0] +def Outputdefinition57Enum(): return StringToEnum("Outputdefinition57")[0] +def Outputdefinition58Enum(): return StringToEnum("Outputdefinition58")[0] +def Outputdefinition59Enum(): return StringToEnum("Outputdefinition59")[0] +def Outputdefinition60Enum(): return StringToEnum("Outputdefinition60")[0] +def Outputdefinition61Enum(): return StringToEnum("Outputdefinition61")[0] +def Outputdefinition62Enum(): return StringToEnum("Outputdefinition62")[0] +def Outputdefinition63Enum(): return StringToEnum("Outputdefinition63")[0] +def Outputdefinition64Enum(): return StringToEnum("Outputdefinition64")[0] +def Outputdefinition65Enum(): return StringToEnum("Outputdefinition65")[0] +def Outputdefinition66Enum(): return StringToEnum("Outputdefinition66")[0] +def Outputdefinition67Enum(): return StringToEnum("Outputdefinition67")[0] +def Outputdefinition68Enum(): return StringToEnum("Outputdefinition68")[0] +def Outputdefinition69Enum(): return StringToEnum("Outputdefinition69")[0] +def Outputdefinition70Enum(): return StringToEnum("Outputdefinition70")[0] +def Outputdefinition71Enum(): return StringToEnum("Outputdefinition71")[0] +def Outputdefinition72Enum(): return StringToEnum("Outputdefinition72")[0] +def Outputdefinition73Enum(): return StringToEnum("Outputdefinition73")[0] +def Outputdefinition74Enum(): return StringToEnum("Outputdefinition74")[0] +def Outputdefinition75Enum(): return StringToEnum("Outputdefinition75")[0] +def Outputdefinition76Enum(): return StringToEnum("Outputdefinition76")[0] +def Outputdefinition77Enum(): return StringToEnum("Outputdefinition77")[0] +def Outputdefinition78Enum(): return StringToEnum("Outputdefinition78")[0] +def Outputdefinition79Enum(): return StringToEnum("Outputdefinition79")[0] +def Outputdefinition80Enum(): return StringToEnum("Outputdefinition80")[0] +def Outputdefinition81Enum(): return StringToEnum("Outputdefinition81")[0] +def Outputdefinition82Enum(): return StringToEnum("Outputdefinition82")[0] +def Outputdefinition83Enum(): return StringToEnum("Outputdefinition83")[0] +def Outputdefinition84Enum(): return StringToEnum("Outputdefinition84")[0] +def Outputdefinition85Enum(): return StringToEnum("Outputdefinition85")[0] +def Outputdefinition86Enum(): return StringToEnum("Outputdefinition86")[0] +def Outputdefinition87Enum(): return StringToEnum("Outputdefinition87")[0] +def Outputdefinition88Enum(): return StringToEnum("Outputdefinition88")[0] +def Outputdefinition89Enum(): return StringToEnum("Outputdefinition89")[0] +def Outputdefinition90Enum(): return StringToEnum("Outputdefinition90")[0] +def Outputdefinition91Enum(): return StringToEnum("Outputdefinition91")[0] +def Outputdefinition92Enum(): return StringToEnum("Outputdefinition92")[0] +def Outputdefinition93Enum(): return StringToEnum("Outputdefinition93")[0] +def Outputdefinition94Enum(): return StringToEnum("Outputdefinition94")[0] +def Outputdefinition95Enum(): return StringToEnum("Outputdefinition95")[0] +def Outputdefinition96Enum(): return StringToEnum("Outputdefinition96")[0] +def Outputdefinition97Enum(): return StringToEnum("Outputdefinition97")[0] +def Outputdefinition98Enum(): return StringToEnum("Outputdefinition98")[0] +def Outputdefinition99Enum(): return StringToEnum("Outputdefinition99")[0] +def Outputdefinition100Enum(): return StringToEnum("Outputdefinition100")[0] +def OutputdefinitionListEnum(): return StringToEnum("OutputdefinitionList")[0] +def MassfluxatgateEnum(): return StringToEnum("Massfluxatgate")[0] +def MassfluxatgateNameEnum(): return StringToEnum("MassfluxatgateName")[0] +def MassfluxatgateDefinitionenumEnum(): return StringToEnum("MassfluxatgateDefinitionenum")[0] +def MassfluxatgateSegmentsEnum(): return StringToEnum("MassfluxatgateSegments")[0] +def MisfitNameEnum(): return StringToEnum("MisfitName")[0] +def MisfitDefinitionenumEnum(): return StringToEnum("MisfitDefinitionenum")[0] +def MisfitModelEnumEnum(): return StringToEnum("MisfitModelEnum")[0] +def MisfitObservationEnum(): return StringToEnum("MisfitObservation")[0] +def MisfitObservationEnumEnum(): return StringToEnum("MisfitObservationEnum")[0] +def MisfitLocalEnum(): return StringToEnum("MisfitLocal")[0] +def MisfitTimeinterpolationEnum(): return StringToEnum("MisfitTimeinterpolation")[0] +def MisfitWeightsEnum(): return StringToEnum("MisfitWeights")[0] +def MisfitWeightsEnumEnum(): return StringToEnum("MisfitWeightsEnum")[0] +def SurfaceObservationEnum(): return StringToEnum("SurfaceObservation")[0] +def WeightsSurfaceObservationEnum(): return StringToEnum("WeightsSurfaceObservation")[0] +def VxObsEnum(): return StringToEnum("VxObs")[0] +def WeightsVxObsEnum(): return StringToEnum("WeightsVxObs")[0] +def VyObsEnum(): return StringToEnum("VyObs")[0] +def WeightsVyObsEnum(): return StringToEnum("WeightsVyObs")[0] +def MinVelEnum(): return StringToEnum("MinVel")[0] +def MaxVelEnum(): return StringToEnum("MaxVel")[0] +def MinVxEnum(): return StringToEnum("MinVx")[0] +def MaxVxEnum(): return StringToEnum("MaxVx")[0] +def MaxAbsVxEnum(): return StringToEnum("MaxAbsVx")[0] +def MinVyEnum(): return StringToEnum("MinVy")[0] +def MaxVyEnum(): return StringToEnum("MaxVy")[0] +def MaxAbsVyEnum(): return StringToEnum("MaxAbsVy")[0] +def MinVzEnum(): return StringToEnum("MinVz")[0] +def MaxVzEnum(): return StringToEnum("MaxVz")[0] +def MaxAbsVzEnum(): return StringToEnum("MaxAbsVz")[0] +def FloatingAreaEnum(): return StringToEnum("FloatingArea")[0] +def GroundedAreaEnum(): return StringToEnum("GroundedArea")[0] +def IceMassEnum(): return StringToEnum("IceMass")[0] +def IceVolumeEnum(): return StringToEnum("IceVolume")[0] +def IceVolumeAboveFloatationEnum(): return StringToEnum("IceVolumeAboveFloatation")[0] +def TotalSmbEnum(): return StringToEnum("TotalSmb")[0] +def AbsoluteEnum(): return StringToEnum("Absolute")[0] +def IncrementalEnum(): return StringToEnum("Incremental")[0] +def AugmentedLagrangianREnum(): return StringToEnum("AugmentedLagrangianR")[0] +def AugmentedLagrangianRhopEnum(): return StringToEnum("AugmentedLagrangianRhop")[0] +def AugmentedLagrangianRlambdaEnum(): return StringToEnum("AugmentedLagrangianRlambda")[0] +def AugmentedLagrangianRholambdaEnum(): return StringToEnum("AugmentedLagrangianRholambda")[0] +def AugmentedLagrangianThetaEnum(): return StringToEnum("AugmentedLagrangianTheta")[0] +def NoneEnum(): return StringToEnum("None")[0] +def AggressiveMigrationEnum(): return StringToEnum("AggressiveMigration")[0] +def SoftMigrationEnum(): return StringToEnum("SoftMigration")[0] +def SubelementMigrationEnum(): return StringToEnum("SubelementMigration")[0] +def SubelementMigration2Enum(): return StringToEnum("SubelementMigration2")[0] +def ContactEnum(): return StringToEnum("Contact")[0] +def GroundingOnlyEnum(): return StringToEnum("GroundingOnly")[0] +def MaskGroundediceLevelsetEnum(): return StringToEnum("MaskGroundediceLevelset")[0] +def GaussSegEnum(): return StringToEnum("GaussSeg")[0] +def GaussTriaEnum(): return StringToEnum("GaussTria")[0] +def GaussTetraEnum(): return StringToEnum("GaussTetra")[0] +def GaussPentaEnum(): return StringToEnum("GaussPenta")[0] +def FSSolverEnum(): return StringToEnum("FSSolver")[0] +def AdjointEnum(): return StringToEnum("Adjoint")[0] +def ColinearEnum(): return StringToEnum("Colinear")[0] +def ControlSteadyEnum(): return StringToEnum("ControlSteady")[0] +def FsetEnum(): return StringToEnum("Fset")[0] +def Gradient1Enum(): return StringToEnum("Gradient1")[0] +def Gradient2Enum(): return StringToEnum("Gradient2")[0] +def Gradient3Enum(): return StringToEnum("Gradient3")[0] +def GradientEnum(): return StringToEnum("Gradient")[0] +def GroundinglineMigrationEnum(): return StringToEnum("GroundinglineMigration")[0] +def GsetEnum(): return StringToEnum("Gset")[0] +def IndexEnum(): return StringToEnum("Index")[0] +def IndexedEnum(): return StringToEnum("Indexed")[0] +def IntersectEnum(): return StringToEnum("Intersect")[0] +def NodalEnum(): return StringToEnum("Nodal")[0] +def OldGradientEnum(): return StringToEnum("OldGradient")[0] +def OutputBufferPointerEnum(): return StringToEnum("OutputBufferPointer")[0] +def OutputBufferSizePointerEnum(): return StringToEnum("OutputBufferSizePointer")[0] +def OutputFilePointerEnum(): return StringToEnum("OutputFilePointer")[0] +def ToolkitsFileNameEnum(): return StringToEnum("ToolkitsFileName")[0] +def RootPathEnum(): return StringToEnum("RootPath")[0] +def OutputFileNameEnum(): return StringToEnum("OutputFileName")[0] +def InputFileNameEnum(): return StringToEnum("InputFileName")[0] +def LockFileNameEnum(): return StringToEnum("LockFileName")[0] +def RestartFileNameEnum(): return StringToEnum("RestartFileName")[0] +def ToolkitsOptionsAnalysesEnum(): return StringToEnum("ToolkitsOptionsAnalyses")[0] +def ToolkitsOptionsStringsEnum(): return StringToEnum("ToolkitsOptionsStrings")[0] +def QmuErrNameEnum(): return StringToEnum("QmuErrName")[0] +def QmuInNameEnum(): return StringToEnum("QmuInName")[0] +def QmuOutNameEnum(): return StringToEnum("QmuOutName")[0] +def RegularEnum(): return StringToEnum("Regular")[0] +def ScaledEnum(): return StringToEnum("Scaled")[0] +def SeparateEnum(): return StringToEnum("Separate")[0] +def SsetEnum(): return StringToEnum("Sset")[0] +def VerboseEnum(): return StringToEnum("Verbose")[0] +def TriangleInterpEnum(): return StringToEnum("TriangleInterp")[0] +def BilinearInterpEnum(): return StringToEnum("BilinearInterp")[0] +def NearestInterpEnum(): return StringToEnum("NearestInterp")[0] +def XYEnum(): return StringToEnum("XY")[0] +def XYZEnum(): return StringToEnum("XYZ")[0] +def DenseEnum(): return StringToEnum("Dense")[0] +def MpiDenseEnum(): return StringToEnum("MpiDense")[0] +def MpiSparseEnum(): return StringToEnum("MpiSparse")[0] +def SeqEnum(): return StringToEnum("Seq")[0] +def MpiEnum(): return StringToEnum("Mpi")[0] +def MumpsEnum(): return StringToEnum("Mumps")[0] +def GslEnum(): return StringToEnum("Gsl")[0] +def OptionEnum(): return StringToEnum("Option")[0] +def GenericOptionEnum(): return StringToEnum("GenericOption")[0] +def OptionCellEnum(): return StringToEnum("OptionCell")[0] +def OptionStructEnum(): return StringToEnum("OptionStruct")[0] +def CuffeyEnum(): return StringToEnum("Cuffey")[0] +def PatersonEnum(): return StringToEnum("Paterson")[0] +def ArrheniusEnum(): return StringToEnum("Arrhenius")[0] +def LliboutryDuvalEnum(): return StringToEnum("LliboutryDuval")[0] +def TransientIslevelsetEnum(): return StringToEnum("TransientIslevelset")[0] +def SpcLevelsetEnum(): return StringToEnum("SpcLevelset")[0] +def ExtrapolationVariableEnum(): return StringToEnum("ExtrapolationVariable")[0] +def IceMaskNodeActivationEnum(): return StringToEnum("IceMaskNodeActivation")[0] +def LevelsetfunctionSlopeXEnum(): return StringToEnum("LevelsetfunctionSlopeX")[0] +def LevelsetfunctionSlopeYEnum(): return StringToEnum("LevelsetfunctionSlopeY")[0] +def LevelsetfunctionPicardEnum(): return StringToEnum("LevelsetfunctionPicard")[0] +def MaximumNumberOfDefinitionsEnum(): return StringToEnum("MaximumNumberOfDefinitions")[0] Index: ../trunk-jpl/src/py3/io/savevars.py =================================================================== --- ../trunk-jpl/src/py3/io/savevars.py (revision 0) +++ ../trunk-jpl/src/py3/io/savevars.py (revision 19895) @@ -0,0 +1,62 @@ +import shelve +import os.path + +def savevars(*args): + """ + SAVEVARS - function to save variables to a file. + + This function saves one or more variables to a file. The names of the variables + must be supplied. If more than one variable is specified, it may be done with + lists of names and values or a dictionary of name:value pairs. All the variables + in the workspace may be saved by specifying the globals() dictionary, but this + may include a lot of extraneous data. + + Usage: + savevars('shelve.dat','a',a) + savevars('shelve.dat',['a','b'],[a,b]) + savevars('shelve.dat',{'a':a,'b':b}) + savevars('shelve.dat',globals()) + + """ + + filename='' + nvdict={} + + if len(args) >= 1 and isinstance(args[0],str): + filename=args[0] + if not filename: + filename='/tmp/shelve.dat' + + else: + raise TypeError("Missing file name.") + + if len(args) >= 3 and isinstance(args[1],str): # (filename,name,value) + for i in range(1,len(args),2): + nvdict[args[i]]=args[i+1] + + elif len(args) == 3 and isinstance(args[1],list) and isinstance(args[2],list): # (filename,[names],[values]) + for name,value in zip(args[1],args[2]): + nvdict[name]=value + + elif len(args) == 2 and isinstance(args[1],dict): # (filename,{names:values}) + nvdict=args[1] + + else: + raise TypeError("Unrecognized input arguments.") + + if os.path.exists(filename): + print("Shelving variables to existing file '%s'." % filename) + else: + print("Shelving variables to new file '%s'." % filename) + + my_shelf = shelve.open(filename,'c') # 'c' for create if not exist, else 'n' for new + + for name,value in nvdict.items(): + try: + my_shelf[name] = value + print("Variable '%s' shelved." % name) + except TypeError: + print("Variable '%s' not shelved." % name) + + my_shelf.close() + Index: ../trunk-jpl/src/py3/io/loadvars.py =================================================================== --- ../trunk-jpl/src/py3/io/loadvars.py (revision 0) +++ ../trunk-jpl/src/py3/io/loadvars.py (revision 19895) @@ -0,0 +1,84 @@ +import shelve +import os.path +from dbm import whichdb + +def loadvars(*args): + """ + LOADVARS - function to load variables to a file. + + This function loads one or more variables from a file. The names of the variables + must be supplied. If more than one variable is specified, it may be done with + a list of names or a dictionary of name as keys. The output type will correspond + to the input type. All the variables in the file may be loaded by specifying only + the file name. + + Usage: + a=loadvars('shelve.dat','a') + [a,b]=loadvars('shelve.dat',['a','b']) + nvdict=loadvars('shelve.dat',{'a':None,'b':None}) + nvdict=loadvars('shelve.dat') + + """ + + filename='' + nvdict={} + + if len(args) >= 1 and isinstance(args[0],str): + filename=args[0] + if not filename: + filename='/tmp/shelve.dat' + + else: + raise TypeError("Missing file name.") + + if len(args) >= 2 and isinstance(args[1],str): # (filename,name) + for name in args[1:]: + nvdict[name]=None + + elif len(args) == 2 and isinstance(args[1],list): # (filename,[names]) + for name in args[1]: + nvdict[name]=None + + elif len(args) == 2 and isinstance(args[1],dict): # (filename,{names:values}) + nvdict=args[1] + + elif len(args) == 1: # (filename) + pass + + else: + raise TypeError("Unrecognized input arguments.") + + if whichdb(filename): + print("Loading variables from file '%s'." % filename) + else: + raise IOError("File '%s' not found." % filename) + + my_shelf = shelve.open(filename,'r') # 'r' for read-only + + if nvdict: + for name in nvdict.keys(): + try: + nvdict[name] = my_shelf[name] + print("Variable '%s' loaded." % name) + except KeyError: + value = None + print("Variable '%s' not found." % name) + + else: + for name in my_shelf.keys(): + nvdict[name] = my_shelf[name] + print("Variable '%s' loaded." % name) + + my_shelf.close() + + if len(args) >= 2 and isinstance(args[1],str): # (value) + value=[nvdict[name] for name in args[1:]] + return value + + elif len(args) == 2 and isinstance(args[1],list): # ([values]) + value=[nvdict[name] for name in args[1]] + return value + + elif (len(args) == 2 and isinstance(args[1],dict)) or (len(args) == 1): # ({names:values}) + return nvdict + Index: ../trunk-jpl/src/py3/io/loadmodel.py =================================================================== --- ../trunk-jpl/src/py3/io/loadmodel.py (revision 0) +++ ../trunk-jpl/src/py3/io/loadmodel.py (revision 19895) @@ -0,0 +1,35 @@ +from loadvars import loadvars +from dbm import whichdb + +def loadmodel(path): + """ + LOADMODEL - load a model using built-in load module + + check that model prototype has not changed. if so, adapt to new model prototype. + + Usage: + md=loadmodel(path) + """ + + #check existence of database (independent of file extension!) + if whichdb(path): + #do nothing + pass + else: + raise IOError("loadmodel error message: file '%s' does not exist" % path) + + try: + #recover model on file and name it md + struc=loadvars(path) + + name=[key for key in struc.keys()] + if len(name)>1: + raise IOError("loadmodel error message: file '%s' contains several variables. Only one model should be present." % path) + + md=struc[name[0]] + return md + + except Exception as me: + print(me) + raise IOError("could not load model '%s'" % path) + Index: ../trunk-jpl/src/py3/modifier.sh =================================================================== --- ../trunk-jpl/src/py3/modifier.sh (revision 0) +++ ../trunk-jpl/src/py3/modifier.sh (revision 19895) @@ -0,0 +1,419 @@ +sed -i 's/(md,'fieldname','friction.coefficient','timeseries',1,'NaN',1)/(md,fieldname='friction.coefficient',timeseries=1,NaN=1)/g' ./classes/frictioncoulomb.py +sed -i 's/(md,'fieldname','friction.coefficientcoulomb','timeseries',1,'NaN',1)/(md,fieldname='friction.coefficientcoulomb',timeseries=1,NaN=1)/g' ./classes/frictioncoulomb.py +sed -i 's/(md,'fieldname','friction.q','NaN',1,'size',[md.mesh.numberofelements])/(md,fieldname='friction.q',NaN=1,size=[md.mesh.numberofelements])/g' ./classes/frictioncoulomb.py +sed -i 's/(md,'fieldname','friction.p','NaN',1,'size',[md.mesh.numberofelements])/(md,fieldname='friction.p',NaN=1,size=[md.mesh.numberofelements])/g' ./classes/frictioncoulomb.py +sed -i 's/(md,'fieldname','autodiff.obufsize','>=',524288)/(md,fieldname='autodiff.obufsize',ge=524288)/g' ./classes/autodiff.py +sed -i 's/(md,'fieldname','autodiff.lbufsize','>=',524288)/(md,fieldname='autodiff.lbufsize',ge=524288)/g' ./classes/autodiff.py +sed -i 's/(md,'fieldname','autodiff.cbufsize','>=',524288)/(md,fieldname='autodiff.cbufsize',ge=524288)/g' ./classes/autodiff.py +sed -i 's/(md,'fieldname','autodiff.tbufsize','>=',524288)/(md,fieldname='autodiff.tbufsize',ge=524288)/g' ./classes/autodiff.py +sed -i 's/(md,'fieldname','autodiff.gcTriggerRatio','>=',2.0)/(md,fieldname='autodiff.gcTriggerRatio',ge=2.0)/g' ./classes/autodiff.py +sed -i 's/(md,'fieldname','autodiff.gcTriggerMaxSize','>=',2000000)/(md,fieldname='autodiff.gcTriggerMaxSize',ge=2000000)/g' ./classes/autodiff.py +sed -i 's/(md,'fieldname','autodiff.driver','values',['fos_forward','fov_forward','fov_forward_all','fos_reverse','fov_reverse','fov_reverse_all'])/(md,fieldname='autodiff.driver',values=['fos_forward','fov_forward','fov_forward_all','fos_reverse','fov_reverse','fov_reverse_all'])/g' ./classes/autodiff.py +sed -i 's/(md,'fieldname','materials.rho_ice','>',0)/(md,fieldname='materials.rho_ice',gt=0)/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.rho_water','>',0)/(md,fieldname='materials.rho_water',gt=0)/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.rho_freshwater','>',0)/(md,fieldname='materials.rho_freshwater',gt=0)/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.mu_water','>',0)/(md,fieldname='materials.mu_water',gt=0)/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.rheology_B','>',0,'timeseries',1,'NaN',1)/(md,fieldname='materials.rheology_B',gt=0,timeseries=1,NaN=1)/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.rheology_n','>',0,'size',[md.mesh.numberofelements])/(md,fieldname='materials.rheology_n',gt=0,size=[md.mesh.numberofelements])/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.rheology_law','values',['None','Cuffey','Paterson','Arrhenius','LliboutryDuval'])/(md,fieldname='materials.rheology_law',values=['None','Cuffey','Paterson','Arrhenius','LliboutryDuval'])/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.lithosphere_shear_modulus','>',0,'numel',[1]);/(md,fieldname='materials.lithosphere_shear_modulus',gt=0,numel=[1]);/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.lithosphere_density','>',0,'numel',[1]);/(md,fieldname='materials.lithosphere_density',gt=0,numel=[1]);/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.mantle_shear_modulus','>',0,'numel',[1]);/(md,fieldname='materials.mantle_shear_modulus',gt=0,numel=[1]);/g' ./classes/matice.py +sed -i 's/(md,'fieldname','materials.mantle_density','>',0,'numel',[1]);/(md,fieldname='materials.mantle_density',gt=0,numel=[1]);/g' ./classes/matice.py: +sed -i 's/(md,'fieldname','smb.desfac','<=',1,'numel',[1])/(md,fieldname='smb.desfac',le=1,numel=[1])/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','smb.s0p','>=',0,'NaN',1,'size',[md.mesh.numberofvertices,1])/(md,fieldname='smb.s0p',ge=0,NaN=1,size=[md.mesh.numberofvertices,1])/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','smb.s0t','>=',0,'NaN',1,'size',[md.mesh.numberofvertices,1])/(md,fieldname='smb.s0t',ge=0,NaN=1,size=[md.mesh.numberofvertices,1])/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','smb.rlaps','>=',0,'numel',[1])/(md,fieldname='smb.rlaps',ge=0,numel=[1])/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','smb.rlapslgm','>=',0,'numel',[1])/(md,fieldname='smb.rlapslgm',ge=0,numel=[1])/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1)/(md,fieldname='smb.temperatures_presentday',size=[md.mesh.numberofvertices+1,12],NaN=1,timeseries=1)/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1)/(md,fieldname='smb.precipitations_presentday',size=[md.mesh.numberofvertices+1,12],NaN=1,timeseries=1)/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','smb.delta18o',NaN=1,'size',[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.delta18o',NaN=1,size=[2,numpy.nan],'singletimesed -i 's/(md,'fieldname','smb.dpermil','>=',0,'numel',[1])/(md,fieldname='smb.dpermil',ge=0,numel=[1])/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','masstransport.requested_outputs','stringrow',1)/(md,fieldname='masstransport.requested_outputs','stringrow',1)/g' ./classes/SMBd18opdd.py: +sed -i 's/(md,'fieldname','inversion.iscontrol','values',[0,1])/(md,fieldname='inversion.iscontrol',values=[0,1])/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.incomplete_adjoint','values',[0,1])/(md,fieldname='inversion.incomplete_adjoint',values=[0,1])/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.control_parameters','cell',1,'values',supportedcontrols())/(md,fieldname='inversion.control_parameters','cell',1,values=supportedcontrols())/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.control_scaling_factors','size',[num_controls],'>',0,'NaN',1)/(md,fieldname='inversion.control_scaling_factors',size=[num_controls],gt=0,NaN=1)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.maxsteps','numel',[1],'>=',0)/(md,fieldname='inversion.maxsteps',numel=[1],ge=0)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.maxiter','numel',[1],'>=',0)/(md,fieldname='inversion.maxiter',numel=[1],ge=0)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.dxmin','numel',[1],'>',0.)/(md,fieldname='inversion.dxmin',numel=[1],gt=0.)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.gttol','numel',[1],'>',0.)/(md,fieldname='inversion.gttol',numel=[1],gt=0.)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions','size',[num_costfunc],'values',supportedcostfunctions())/(md,fieldname='inversion.cost_functions',size=[num_costfunc],values=supportedcostfunctions())/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices,num_costfunc],'>=',0)/(md,fieldname='inversion.cost_functions_coefficients',size=[md.mesh.numberofvertices,num_costfunc],ge=0)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices,num_controls])/(md,fieldname='inversion.min_parameters',size=[md.mesh.numberofvertices,num_controls])/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices,num_controls])/(md,fieldname='inversion.max_parameters',size=[md.mesh.numberofvertices,num_controls])/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='inversion.thickness_obs',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='inversion.vx_obs',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/m1qn3inversion.py: +sed -i 's/(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='inversion.vy_obs',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/m1qn3inversion.py: +sed -i 's///g' ./classes/masstransport.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','masstransport.spcthickness','timeseries',1)/(md,fieldname='masstransport.spcthickness',timeseries=1)/g' ./classes/masstransport.py: +sed -i 's/(md,'fieldname','masstransport.isfreesurface','values',[0,1])/(md,fieldname='masstransport.isfreesurface',values=[0,1])/g' ./classes/masstransport.py: +sed -i 's/(md,'fieldname','masstransport.hydrostatic_adjustment','values',['Absolute','Incremental'])/(md,fieldname='masstransport.hydrostatic_adjustment',values=['Absolute','Incremental'])/g' ./classes/masstransport.py: +sed -i 's/(md,'fieldname','masstransport.stabilization','values',[0,1,2,3,4])/(md,fieldname='masstransport.stabilization',values=[0,1,2,3,4])/g' ./classes/masstransport.py: +sed -i 's/(md,'fieldname','masstransport.min_thickness','>',0)/(md,fieldname='masstransport.min_thickness',gt=0)/g' ./classes/masstransport.py: +sed -i 's/(md,'fieldname','masstransport.requested_outputs','stringrow',1)/(md,fieldname='masstransport.requested_outputs','stringrow',1)/g' ./classes/masstransport.py: +sed -i 's///g' ./classes/mismipbasalforcings.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,timeseries=1)/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1])/(md,fieldname='basalforcings.meltrate_factor',ge=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1])/(md,fieldname='basalforcings.threshold_thickness',ge=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1])/(md,fieldname='basalforcings.upperdepth_melt',le=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1])/(md,fieldname='basalforcings.meltrate_factor',ge=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1])/(md,fieldname='basalforcings.threshold_thickness',ge=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1])/(md,fieldname='basalforcings.upperdepth_melt',le=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,timeseries=1)/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.meltrate_factor','>=',0,'numel',[1])/(md,fieldname='basalforcings.meltrate_factor',ge=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.threshold_thickness','>=',0,'numel',[1])/(md,fieldname='basalforcings.threshold_thickness',ge=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.upperdepth_melt','<=',0,'numel',[1])/(md,fieldname='basalforcings.upperdepth_melt',le=0,numel=[1])/g' ./classes/mismipbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.geothermalflux','NaN',1,'timeseries',1,'>=',0)/(md,fieldname='basalforcings.geothermalflux',NaN=1,timeseries=1,ge=0)/g' ./classes/mismipbasalforcings.py: +sed -i 's///g' ./classes/timestepping.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','timestepping.start_time','numel',[1],'NaN',1)/(md,fieldname='timestepping.start_time',numel=[1],NaN=1)/g' ./classes/timestepping.py: +sed -i 's/(md,'fieldname','timestepping.final_time','numel',[1],'NaN',1)/(md,fieldname='timestepping.final_time',numel=[1],NaN=1)/g' ./classes/timestepping.py: +sed -i 's/(md,'fieldname','timestepping.time_step','numel',[1],'>=',0,'NaN',1)/(md,fieldname='timestepping.time_step',numel=[1],ge=0,NaN=1)/g' ./classes/timestepping.py: +sed -i 's/(md,'fieldname','timestepping.time_adapt','numel',[1],'values',[0,1])/(md,fieldname='timestepping.time_adapt',numel=[1],values=[0,1])/g' ./classes/timestepping.py: +sed -i 's/(md,'fieldname','timestepping.cfl_coefficient','numel',[1],'>',0,'<=',1)/(md,fieldname='timestepping.cfl_coefficient',numel=[1],gt=0,le=1)/g' ./classes/timestepping.py: +sed -i 's/(md,'fieldname','timestepping.interp_forcings','numel',[1],'values',[0,1])/(md,fieldname='timestepping.interp_forcings',numel=[1],values=[0,1])/g' ./classes/timestepping.py: +sed -i 's///g' ./classes/calving.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','calving.spclevelset','timeseries',1)/(md,fieldname='calving.spclevelset',timeseries=1)/g' ./classes/calving.py: +sed -i 's/(md,'fieldname','calving.stabilization','values',[0,1,2]);/(md,fieldname='calving.stabilization',values=[0,1,2]);/g' ./classes/calving.py: +sed -i 's/(md,'fieldname','calving.calvingrate','>=',0,'timeseries',1,'NaN',1);/(md,fieldname='calving.calvingrate',ge=0,timeseries=1,NaN=1);/g' ./classes/calving.py: +sed -i 's/(md,'fieldname','calving.meltingrate','>=',0,'timeseries',1,'NaN',1);/(md,fieldname='calving.meltingrate',ge=0,timeseries=1,NaN=1);/g' ./classes/calving.py: +sed -i 's///g' ./classes/SMBcomponents.py:from checkfield import * +sed -i 's/(md,'fieldname','smb.accumulation','timeseries',1,'NaN',1)/(md,fieldname='smb.accumulation',timeseries=1,NaN=1)/g' ./classes/SMBcomponents.py: +sed -i 's/(md,'fieldname','smb.accumulation','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.accumulation',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBcomponents.py: +sed -i 's/(md,'fieldname','smb.runoff','timeseries',1,'NaN',1)/(md,fieldname='smb.runoff',timeseries=1,NaN=1)/g' ./classes/SMBcomponents.py: +sed -i 's/(md,'fieldname','smb.runoff','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.runoff',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBcomponents.py: +sed -i 's/(md,'fieldname','smb.evaporation','timeseries',1,'NaN',1)/(md,fieldname='smb.evaporation',timeseries=1,NaN=1)/g' ./classes/SMBcomponents.py: +sed -i 's/(md,'fieldname','smb.evaporation','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.evaporation',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBcomponents.py: +sed -i 's/(md,'fieldname','masstransport.requested_outputs','stringrow',1)/(md,fieldname='masstransport.requested_outputs','stringrow',1)/g' ./classes/SMBcomponents.py: +sed -i 's///g' ./classes/flaim.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','flaim.tracks','file',1)/(md,fieldname='flaim.tracks','file',1)/g' ./classes/flaim.py: +sed -i 's/(md,'fieldname','flaim.targets','file',1)/(md,fieldname='flaim.targets','file',1)/g' ./classes/flaim.py: +sed -i 's/(md,'fieldname','flaim.criterion',numel=[md.mesh.numberofvertices,md.mesh.numberofelements])/(md,fieldname='flaim.criterion','numel',[md.mesh.numberofvertices,md.mesh.numberofelements])/g' ./classes/flaim.py: +sed -i 's/(md,'fieldname',"autodiff.independents[%d].fov_forward_indices" % i,'>=',1,'<=',self.nods,'size',[float('NaN'),1])/(md,fieldname="autodiff.independents[%d].fov_forward_indices" % i,ge=1,le=self.nods,size=[float('NaN'),1])/g' ./classes/independent.py: +sed -i 's///g' ./classes/calvinglevermann.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','calving.spclevelset','timeseries',1)/(md,fieldname='calving.spclevelset',timeseries=1)/g' ./classes/calvinglevermann.py: +sed -i 's/(md,'fieldname','calving.stabilization','values',[0,1,2]);/(md,fieldname='calving.stabilization',values=[0,1,2]);/g' ./classes/calvinglevermann.py: +sed -i 's/(md,'fieldname','calving.coeff','size',[md.mesh.numberofvertices],'>',0)/(md,fieldname='calving.coeff',size=[md.mesh.numberofvertices],gt=0)/g' ./classes/calvinglevermann.py: +sed -i 's/(md,'fieldname','calving.meltingrate','NaN',1,'size',[md.mesh.numberofvertices],'>=',0)/(md,fieldname='calving.meltingrate',NaN=1,size=[md.mesh.numberofvertices],ge=0)/g' ./classes/calvinglevermann.py: +sed -i 's///g' ./classes/steadystate.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','steadystate.requested_outputs','stringrow',1)/(md,fieldname='steadystate.requested_outputs','stringrow',1)/g' ./classes/steadystate.py: +sed -i 's///g' ./classes/SMBmeltcomponents.py:from checkfield import * +sed -i 's/(md,'fieldname','smb.accumulation','timeseries',1,'NaN',1)/(md,fieldname='smb.accumulation',timeseries=1,NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','smb.accumulation','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.accumulation',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','smb.melt','timeseries',1,'NaN',1)/(md,fieldname='smb.melt',timeseries=1,NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','smb.melt','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.melt',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','smb.refreeze','timeseries',1,'NaN',1)/(md,fieldname='smb.refreeze',timeseries=1,NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','smb.refreeze','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.refreeze',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','smb.evaporation','timeseries',1,'NaN',1)/(md,fieldname='smb.evaporation',timeseries=1,NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','smb.evaporation','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.evaporation',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's/(md,'fieldname','masstransport.requested_outputs','stringrow',1)/(md,fieldname='masstransport.requested_outputs','stringrow',1)/g' ./classes/SMBmeltcomponents.py: +sed -i 's///g' ./classes/matdamageice.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','materials.rho_ice','>',0)/(md,fieldname='materials.rho_ice',gt=0)/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.rho_water','>',0)/(md,fieldname='materials.rho_water',gt=0)/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.rho_freshwater','>',0)/(md,fieldname='materials.rho_freshwater',gt=0)/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.mu_water','>',0)/(md,fieldname='materials.mu_water',gt=0)/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.rheology_B','>',0,'size',[md.mesh.numberofvertices])/(md,fieldname='materials.rheology_B',gt=0,size=[md.mesh.numberofvertices])/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.rheology_n','>',0,'size',[md.mesh.numberofelements])/(md,fieldname='materials.rheology_n',gt=0,size=[md.mesh.numberofelements])/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.rheology_law','values',['None','Cuffey','Paterson','Arrhenius','LliboutryDuval'])/(md,fieldname='materials.rheology_law',values=['None','Cuffey','Paterson','Arrhenius','LliboutryDuval'])/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.lithosphere_shear_modulus','>',0,numel=[1]);/(md,fieldname='materials.lithosphere_shear_modulus',gt=0,'numel',[1]);/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.lithosphere_density','>',0,numel=[1]);/(md,fieldname='materials.lithosphere_density',gt=0,'numel',[1]);/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.mantle_shear_modulus','>',0,numel=[1]);/(md,fieldname='materials.mantle_shear_modulus',gt=0,'numel',[1]);/g' ./classes/matdamageice.py: +sed -i 's/(md,'fieldname','materials.mantle_density','>',0,'numel',[1]);/(md,fieldname='materials.mantle_density',gt=0,numel=[1]);/g' ./classes/matdamageice.py: +sed -i 's///g' ./classes/massfluxatgate.py:from checkfield import checkfield +sed -i 's/(md,'field',self.definitionenum,'values',[Outputdefinition1Enum(),Outputdefinition2Enum(),Outputdefinition3Enum(),Outputdefinition4Enum(),Outputdefinition5Enum(),Outputdefinition6Enum(),Outputdefinition7Enum(),Outputdefinition8Enum(),Outputdefinition9Enum(),Outputdefinition10Enum()])/(md,'field',self.definitionenum,values=[Outputdefinition1Enum(),Outputdefinition2Enum(),Outputdefinition3Enum(),Outputdefinition4Enum(),Outputdefinition5Enum(),Outputdefinition6Enum(),Outputdefinition7Enum(),Outputdefinition8Enum(),Outputdefinition9Enum(),Outputdefinition10Enum()])/g' ./classes/massfluxatgate.py: +sed -i 's///g' ./classes/gia.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','gia.mantle_viscosity','NaN',1,'size',[md.mesh.numberofvertices,1],'>',0)/(md,fieldname='gia.mantle_viscosity',NaN=1,size=[md.mesh.numberofvertices,1],gt=0)/g' ./classes/gia.py: +sed -i 's/(md,'fieldname','gia.lithosphere_thickness','NaN',1,'size',[md.mesh.numberofvertices,1],'>',0)/(md,fieldname='gia.lithosphere_thickness',NaN=1,size=[md.mesh.numberofvertices,1],gt=0)/g' ./classes/gia.py: +sed -i 's/(md,'fieldname','gia.cross_section_shape','numel',[1],'values',[1,2])/(md,fieldname='gia.cross_section_shape',numel=[1],values=[1,2])/g' ./classes/gia.py: +sed -i 's///g' ./classes/balancethickness.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','balancethickness.spcthickness')/(md,fieldname='balancethickness.spcthickness')/g' ./classes/balancethickness.py: +sed -i 's/(md,'fieldname','balancethickness.thickening_rate','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='balancethickness.thickening_rate',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/balancethickness.py: +sed -i 's/(md,'fieldname','balancethickness.stabilization','size',[1],'values',[0,1,2,3])/(md,fieldname='balancethickness.stabilization',size=[1],values=[0,1,2,3])/g' ./classes/balancethickness.py: +sed -i 's///g' ./classes/SMBgradients.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','smb.href','timeseries',1,'NaN',1)/(md,fieldname='smb.href',timeseries=1,NaN=1)/g' ./classes/SMBgradients.py: +sed -i 's/(md,'fieldname','smb.smbref','timeseries',1,'NaN',1)/(md,fieldname='smb.smbref',timeseries=1,NaN=1)/g' ./classes/SMBgradients.py: +sed -i 's/(md,'fieldname','smb.b_pos','timeseries',1,'NaN',1)/(md,fieldname='smb.b_pos',timeseries=1,NaN=1)/g' ./classes/SMBgradients.py: +sed -i 's/(md,'fieldname','smb.b_neg','timeseries',1,'NaN',1)/(md,fieldname='smb.b_neg',timeseries=1,NaN=1)/g' ./classes/SMBgradients.py: +sed -i 's/(md,'fieldname','masstransport.requested_outputs','stringrow',1)/(md,fieldname='masstransport.requested_outputs','stringrow',1)/g' ./classes/SMBgradients.py: +sed -i 's///g' ./classes/mesh2d.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','mesh.x','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='mesh.x',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/mesh2d.py: +sed -i 's/(md,'fieldname','mesh.y','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='mesh.y',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/mesh2d.py: +sed -i 's/(md,'fieldname','mesh.elements','NaN',1,'>',0,'values',numpy.arange(1,md.mesh.numberofvertices+1))/(md,fieldname='mesh.elements',NaN=1,gt=0,values=numpy.arange(1,md.mesh.numberofvertices+1))/g' ./classes/mesh2d.py: +sed -i 's/(md,'fieldname','mesh.elements','size',[md.mesh.numberofelements,3])/(md,fieldname='mesh.elements',size=[md.mesh.numberofelements,3])/g' ./classes/mesh2d.py: +sed -i 's/(md,'fieldname','mesh.numberofelements','>',0)/(md,fieldname='mesh.numberofelements',gt=0)/g' ./classes/mesh2d.py: +sed -i 's/(md,'fieldname','mesh.numberofvertices','>',0)/(md,fieldname='mesh.numberofvertices',gt=0)/g' ./classes/mesh2d.py: +sed -i 's/(md,'fieldname','mesh.average_vertex_connectivity','>=',9,'message',"'mesh.average_vertex_connectivity' should be at least 9 in 2d")/(md,fieldname='mesh.average_vertex_connectivity',ge=9,'message',"'mesh.average_vertex_connectivity' should be at least 9 in 2d")/g' ./classes/mesh2d.py: +sed -i 's/(md,'fieldname','inversion.iscontrol','values',[0, 1])/(md,fieldname='inversion.iscontrol',values=[0, 1])/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.control_parameters','cell',1,'values',\/(md,fieldname='inversion.control_parameters','cell',1,values=\/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.control_scaling_factors','size',[1, num_controls],'>',0,float('Nan'),1)/(md,fieldname='inversion.control_scaling_factors',size=[1, num_controls],gt=0,float('Nan'),1)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.maxsteps','numel',1,'>=',0)/(md,fieldname='inversion.maxsteps',numel=1,ge=0)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.maxiter','numel',1,'>=',0)/(md,fieldname='inversion.maxiter',numel=1,ge=0)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.dxmin','numel',1,'>',0)/(md,fieldname='inversion.dxmin',numel=1,gt=0)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.gttol','numel',1,'>',0)/(md,fieldname='inversion.gttol',numel=1,gt=0)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions','size',[1, num_costfunc],'values', [i for i in range(101,106)]+[201]+[i for i in range(501,507)]+[i for i in range(601,605)]+[i for i in range(1001, 1011)])/(md,fieldname='inversion.cost_functions',size=[1, num_costfunc],values= [i for i in range(101,106)]+[201]+[i for i in range(501,507)]+[i for i in range(601,605)]+[i for i in range(1001, 1011)])/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices, num_costfunc],'>=',0)/(md,fieldname='inversion.cost_functions_coefficients',size=[md.mesh.numberofvertices, num_costfunc],ge=0)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices, num_controls])/(md,fieldname='inversion.min_parameters',size=[md.mesh.numberofvertices, num_controls])/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices, num_controls])/(md,fieldname='inversion.max_parameters',size=[md.mesh.numberofvertices, num_controls])/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1)/(md,fieldname='inversion.thickness_obs',size=[md.mesh.numberofvertices, 1],float('Nan'),1)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.surface_obs','size',[md.mesh.numberofvertices, 1], float('Nan'),1)/(md,fieldname='inversion.surface_obs',size=[md.mesh.numberofvertices, 1], float('Nan'),1)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1)/(md,fieldname='inversion.thickness_obs',size=[md.mesh.numberofvertices, 1],float('Nan'),1)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1)/(md,fieldname='inversion.vx_obs',size=[md.mesh.numberofvertices, 1],float('Nan'),1)/g' ./classes/adinversion.py: +sed -i 's/(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices, 1],float('Nan'),1)/(md,fieldname='inversion.vy_obs',size=[md.mesh.numberofvertices, 1],float('Nan'),1)/g' ./classes/adinversion.py: +sed -i 's///g' ./classes/damage.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','damage.isdamage','numel',[1],'values',[0,1])/(md,fieldname='damage.isdamage',numel=[1],values=[0,1])/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.D','>=',0,'<=',self.max_damage,'size',[md.mesh.numberofvertices])/(md,fieldname='damage.D',ge=0,le=self.max_damage,size=[md.mesh.numberofvertices])/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.max_damage','<',1,'>=',0)/(md,fieldname='damage.max_damage','<',1,ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.law','numel',[1],'values',[0,1,2,3])/(md,fieldname='damage.law',numel=[1],values=[0,1,2,3])/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.spcdamage','timeseries',1)/(md,fieldname='damage.spcdamage',timeseries=1)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.stabilization','numel',[1],'values',[0,1,2,4])/(md,fieldname='damage.stabilization',numel=[1],values=[0,1,2,4])/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.maxiter','>=0',0)/(md,fieldname='damage.maxiter','>=0',0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.elementinterp','values',['P1','P2'])/(md,fieldname='damage.elementinterp',values=['P1','P2'])/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.stress_threshold','>=',0)/(md,fieldname='damage.stress_threshold',ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.kappa','>',1)/(md,fieldname='damage.kappa',gt=1)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.healing','>=',0)/(md,fieldname='damage.healing',ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.c1','>=',0)/(md,fieldname='damage.c1',ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.c2','>=',0)/(md,fieldname='damage.c2',ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.c3','>=',0)/(md,fieldname='damage.c3',ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.c4','>=',0)/(md,fieldname='damage.c4',ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.healing','>=',0)/(md,fieldname='damage.healing',ge=0)/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.equiv_stress','numel',[1],'values',[0,1])/(md,fieldname='damage.equiv_stress',numel=[1],values=[0,1])/g' ./classes/damage.py: +sed -i 's/(md,'fieldname','damage.requested_outputs','stringrow',1)/(md,fieldname='damage.requested_outputs','stringrow',1)/g' ./classes/damage.py: +sed -i 's///g' ./classes/friction.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','friction.coefficient','timeseries',1,'NaN',1)/(md,fieldname='friction.coefficient',timeseries=1,NaN=1)/g' ./classes/friction.py: +sed -i 's/(md,'fieldname','friction.q','NaN',1,'size',[md.mesh.numberofelements])/(md,fieldname='friction.q',NaN=1,size=[md.mesh.numberofelements])/g' ./classes/friction.py: +sed -i 's/(md,'fieldname','friction.p','NaN',1,'size',[md.mesh.numberofelements])/(md,fieldname='friction.p',NaN=1,size=[md.mesh.numberofelements])/g' ./classes/friction.py: +sed -i 's///g' ./classes/thermal.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','thermal.stabilization','numel',[1],'values',[0,1,2])/(md,fieldname='thermal.stabilization',numel=[1],values=[0,1,2])/g' ./classes/thermal.py: +sed -i 's/(md,'fieldname','thermal.spctemperature','timeseries',1)/(md,fieldname='thermal.spctemperature',timeseries=1)/g' ./classes/thermal.py: +sed -i 's/(md,'fieldname','thermal.spctemperature[numpy.nonzero(numpy.logical_not(numpy.isnan(md.thermal.spctemperature[0:md.mesh.numberofvertices,:])))]','<',md.materials.meltingpoint-md.materials.beta*md.materials.rho_ice*md.constants.g*replicate[pos],'message',"spctemperature should be below the adjusted melting point")/(md,fieldname='thermal.spctemperature[numpy.nonzero(numpy.logical_not(numpy.isnan(md.thermal.spctemperature[0:md.mesh.numberofvertices,:])))]','<',md.materials.meltingpoint-md.materials.beta*md.materials.rho_ice*md.constants.g*replicate[pos],'message',"spctemperature should be below the adjusted melting point")/g' ./classes/thermal.py: +sed -i 's/(md,'fieldname','thermal.isenthalpy','numel',[1],'values',[0,1])/(md,fieldname='thermal.isenthalpy',numel=[1],values=[0,1])/g' ./classes/thermal.py: +sed -i 's/(md,'fieldname','thermal.isdynamicbasalspc','numel',[1],'values',[0,1]);/(md,fieldname='thermal.isdynamicbasalspc',numel=[1],values=[0,1]);/g' ./classes/thermal.py: +sed -i 's/(md,'fieldname','thermal.reltol','>',0.,'message',"reltol must be larger than zero");/(md,fieldname='thermal.reltol',gt=0.,'message',"reltol must be larger than zero");/g' ./classes/thermal.py: +sed -i 's/(md,'fieldname','thermal.requested_outputs','stringrow',1)/(md,fieldname='thermal.requested_outputs','stringrow',1)/g' ./classes/thermal.py: +sed -i 's///g' ./classes/constants.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','constants.g','>',0,'size',[1])/(md,fieldname='constants.g',gt=0,size=[1])/g' ./classes/constants.py: +sed -i 's/(md,'fieldname','constants.yts','>',0,'size',[1])/(md,fieldname='constants.yts',gt=0,size=[1])/g' ./classes/constants.py: +sed -i 's/(md,'fieldname','constants.referencetemperature','size',[1])/(md,fieldname='constants.referencetemperature',size=[1])/g' ./classes/constants.py: +sed -i 's///g' ./classes/SMBforcing.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','smb.mass_balance','timeseries',1,'NaN',1)/(md,fieldname='smb.mass_balance',timeseries=1,NaN=1)/g' ./classes/SMBforcing.py: +sed -i 's/(md,'fieldname','smb.mass_balance','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='smb.mass_balance',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/SMBforcing.py: +sed -i 's/(md,'fieldname','masstransport.requested_outputs','stringrow',1)/(md,fieldname='masstransport.requested_outputs','stringrow',1)/g' ./classes/SMBforcing.py: +sed -i 's///g' ./classes/settings.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','settings.results_on_nodes','numel',[1],'values',[0,1])/(md,fieldname='settings.results_on_nodes',numel=[1],values=[0,1])/g' ./classes/settings.py: +sed -i 's/(md,'fieldname','settings.io_gather','numel',[1],'values',[0,1])/(md,fieldname='settings.io_gather',numel=[1],values=[0,1])/g' ./classes/settings.py: +sed -i 's/(md,'fieldname','settings.lowmem','numel',[1],'values',[0,1])/(md,fieldname='settings.lowmem',numel=[1],values=[0,1])/g' ./classes/settings.py: +sed -i 's/(md,'fieldname','settings.output_frequency','numel',[1],'>=',1)/(md,fieldname='settings.output_frequency',numel=[1],ge=1)/g' ./classes/settings.py: +sed -i 's/(md,'fieldname','settings.recording_frequency','numel',[1],'>=',0)/(md,fieldname='settings.recording_frequency',numel=[1],ge=0)/g' ./classes/settings.py: +sed -i 's/(md,'fieldname','settings.waitonlock','numel',[1])/(md,fieldname='settings.waitonlock',numel=[1])/g' ./classes/settings.py: +sed -i 's///g' ./classes/SMBpdd.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','smb.desfac','<=',1,'numel',[1])/(md,fieldname='smb.desfac',le=1,numel=[1])/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.s0p','>=',0,'NaN',1,'size',[md.mesh.numberofvertices,1])/(md,fieldname='smb.s0p',ge=0,NaN=1,size=[md.mesh.numberofvertices,1])/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.s0t','>=',0,'NaN',1,'size',[md.mesh.numberofvertices,1])/(md,fieldname='smb.s0t',ge=0,NaN=1,size=[md.mesh.numberofvertices,1])/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.rlaps','>=',0,'numel',[1])/(md,fieldname='smb.rlaps',ge=0,numel=[1])/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.rlapslgm','>=',0,'numel',[1])/(md,fieldname='smb.rlapslgm',ge=0,numel=[1])/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.monthlytemperatures','NaN',1,'timeseries',1)/(md,fieldname='smb.monthlytemperatures',NaN=1,timeseries=1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.precipitation','NaN',1,'timeseries',1)/(md,fieldname='smb.precipitation',NaN=1,timeseries=1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.delta18o','NaN',1,'size',[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.delta18o',NaN=1,size=[2,numpy.nan],'singletimeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.delta18o_surface','NaN',1,'size',[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.delta18o_surface',NaN=1,size=[2,numpy.nan],'singletimeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1)/(md,fieldname='smb.temperatures_presentday',size=[md.mesh.numberofvertices+1,12],NaN=1,timeseries=1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.temperatures_lgm','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1)/(md,fieldname='smb.temperatures_lgm',size=[md.mesh.numberofvertices+1,12],NaN=1,timeseries=1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1)/(md,fieldname='smb.precipitations_presentday',size=[md.mesh.numberofvertices+1,12],NaN=1,timeseries=1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.precipitations_lgm','size',[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1) /(md,fieldname='smb.precipitations_lgm','size',[md.mesh.numberofvertices+1,12],NaN=1,timeseries=1) /g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.Tdiff','NaN',1,size=[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.Tdiff',NaN=1,'size',[2,numpy.nan],'singletimeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.sealev','NaN',1,size=[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.sealev',NaN=1,'size',[2,numpy.nan],'singletimeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.temperatures_presentday',size=[md.mesh.numberofvertices+1,12],'NaN',1,'timeseries',1)/(md,fieldname='smb.temperatures_presentday','size',[md.mesh.numberofvertices+1,12],NaN=1,timeseries=1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.temperatures_lgm',size=[md.mesh.numberofvertices+1,12],'NaN',1,timeseries=1)/(md,fieldname='smb.temperatures_lgm','size',[md.mesh.numberofvertices+1,12],NaN=1,'timeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.precipitations_presentday',size=[md.mesh.numberofvertices+1,12],'NaN',1,timeseries=1)/(md,fieldname='smb.precipitations_presentday','size',[md.mesh.numberofvertices+1,12],NaN=1,'timeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.precipitations_lgm',size=[md.mesh.numberofvertices+1,12],'NaN',1,timeseries=1) /(md,fieldname='smb.precipitations_lgm','size',[md.mesh.numberofvertices+1,12],NaN=1,'timeseries',1) /g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.Pfac','NaN',1,size=[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.Pfac',NaN=1,'size',[2,numpy.nan],'singletimeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.Tdiff','NaN',1,size=[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.Tdiff',NaN=1,'size',[2,numpy.nan],'singletimeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','smb.sealev','NaN',1,size=[2,numpy.nan],'singletimeseries',1)/(md,fieldname='smb.sealev',NaN=1,'size',[2,numpy.nan],'singletimeseries',1)/g' ./classes/SMBpdd.py: +sed -i 's/(md,'fieldname','masstransport.requested_outputs','stringrow',1)/(md,fieldname='masstransport.requested_outputs','stringrow',1)/g' ./classes/SMBpdd.py: +sed -i 's///g' ./classes/toolkits.py:from checkfield import checkfield +sed -i 's///g' ./classes/transient.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','transient.issmb','numel',[1],'values',[0,1])/(md,fieldname='transient.issmb',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.ismasstransport','numel',[1],'values',[0,1])/(md,fieldname='transient.ismasstransport',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.isstressbalance','numel',[1],'values',[0,1])/(md,fieldname='transient.isstressbalance',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.isthermal','numel',[1],'values',[0,1])/(md,fieldname='transient.isthermal',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.isgroundingline','numel',[1],'values',[0,1])/(md,fieldname='transient.isgroundingline',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.isgia','numel',[1],'values',[0,1])/(md,fieldname='transient.isgia',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.isdamageevolution','numel',[1],'values',[0,1])/(md,fieldname='transient.isdamageevolution',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.islevelset','numel',[1],'values',[0,1])/(md,fieldname='transient.islevelset',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.ishydrology','numel',[1],'values',[0,1])/(md,fieldname='transient.ishydrology',numel=[1],values=[0,1])/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.iscalving','numel',[1],'values',[0,1]);/(md,'fieldname','transient.iscalving',numel=[1],values=[0,1]);/g' ./classes/transient.py: +sed -i 's/(md,'fieldname','transient.requested_outputs','stringrow',1)/(md,fieldname='transient.requested_outputs','stringrow',1)/g' ./classes/transient.py: +sed -i 's///g' ./classes/basalforcings.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,timeseries=1)/g' ./classes/basalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.floatingice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.floatingice_melting_rate',NaN=1,timeseries=1)/g' ./classes/basalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/basalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.floatingice_melting_rate','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='basalforcings.floatingice_melting_rate',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/basalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,timeseries=1)/g' ./classes/basalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.floatingice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.floatingice_melting_rate',NaN=1,timeseries=1)/g' ./classes/basalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.geothermalflux','NaN',1,'timeseries',1,'>=',0)/(md,fieldname='basalforcings.geothermalflux',NaN=1,timeseries=1,ge=0)/g' ./classes/basalforcings.py: +sed -i 's///g' ./classes/stressbalance.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','stressbalance.spcvx','timeseries',1)/(md,fieldname='stressbalance.spcvx',timeseries=1)/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.spcvy','timeseries',1)/(md,fieldname='stressbalance.spcvy',timeseries=1)/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.spcvz','timeseries',1)/(md,fieldname='stressbalance.spcvz',timeseries=1)/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.restol',size=[1],'>',0)/(md,fieldname='stressbalance.restol','size',[1],gt=0)/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.reltol',size=[1])/(md,fieldname='stressbalance.reltol','size',[1])/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.abstol',size=[1])/(md,fieldname='stressbalance.abstol','size',[1])/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.isnewton','numel',[1],'values',[0,1,2])/(md,fieldname='stressbalance.isnewton',numel=[1],values=[0,1,2])/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.FSreconditioning',size=[1],'NaN',1)/(md,fieldname='stressbalance.FSreconditioning','size',[1],NaN=1)/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.viscosity_overshoot',size=[1],'NaN',1)/(md,fieldname='stressbalance.viscosity_overshoot','size',[1],NaN=1)/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.maxiter',size=[1],'>=',1)/(md,fieldname='stressbalance.maxiter','size',[1],ge=1)/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.referential',size=[md.mesh.numberofvertices,6])/(md,fieldname='stressbalance.referential','size',[md.mesh.numberofvertices,6])/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.loadingforce',size=[md.mesh.numberofvertices,3])/(md,fieldname='stressbalance.loadingforce','size',[md.mesh.numberofvertices,3])/g' ./classes/stressbalance.py: +sed -i 's/(md,'fieldname','stressbalance.requested_outputs','stringrow',1);/(md,'fieldname','stressbalance.requested_outputs','stringrow',1);/g' ./classes/stressbalance.py: +sed -i 's///g' ./classes/initialization.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','initialization.vx','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vx',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vy','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vy',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vx','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vx',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vy','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vy',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vx','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vx',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vy','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vy',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vx','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vx',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vy','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vy',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.temperature','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.temperature',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.vz','NaN',1,size=[md.mesh.numberofvertices])/(md,fieldname='initialization.vz',NaN=1,'size',[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.pressure','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='initialization.pressure',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.waterfraction','>=',0,'size',[md.mesh.numberofvertices])/(md,fieldname='initialization.waterfraction',ge=0,size=[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.watercolumn' ,'>=',0,'size',[md.mesh.numberofvertices])/(md,fieldname='initialization.watercolumn' ,ge=0,size=[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.watercolumn','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='initialization.watercolumn',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.sediment_head','NaN',1,'size',[md.mesh.numberofvertices,1])/(md,fieldname='initialization.sediment_head',NaN=1,size=[md.mesh.numberofvertices,1])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.epl_head','NaN',1,'size',[md.mesh.numberofvertices,1])/(md,fieldname='initialization.epl_head',NaN=1,size=[md.mesh.numberofvertices,1])/g' ./classes/initialization.py: +sed -i 's/(md,'fieldname','initialization.epl_thickness','NaN',1,'size',[md.mesh.numberofvertices,1])/(md,fieldname='initialization.epl_thickness',NaN=1,size=[md.mesh.numberofvertices,1])/g' ./classes/initialization.py: +sed -i 's///g' ./classes/hydrologydc.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','hydrology.water_compressibility','numel',[1],'>',0.)/(md,fieldname='hydrology.water_compressibility',numel=[1],gt=0.)/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.isefficientlayer','numel',[1],'values',[0,1])/(md,fieldname='hydrology.isefficientlayer',numel=[1],values=[0,1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.penalty_factor','>',0.,'numel',[1])/(md,fieldname='hydrology.penalty_factor',gt=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.penalty_lock','>=',0.,'numel',[1])/(md,fieldname='hydrology.penalty_lock',ge=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.rel_tol','>',0.,'numel',[1])/(md,fieldname='hydrology.rel_tol',gt=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.max_iter','>',0.,'numel',[1])/(md,fieldname='hydrology.max_iter',gt=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.sedimentlimit_flag','numel',[1],'values',[0,1,2,3])/(md,fieldname='hydrology.sedimentlimit_flag',numel=[1],values=[0,1,2,3])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.transfer_flag','numel',[1],'values',[0,1])/(md,fieldname='hydrology.transfer_flag',numel=[1],values=[0,1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.sedimentlimit','>',0.,'numel',[1])/(md,fieldname='hydrology.sedimentlimit',gt=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.leakage_factor','>',0.,'numel',[1])/(md,fieldname='hydrology.leakage_factor',gt=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.basal_moulin_input','NaN',1,'timeseries',1)/(md,fieldname='hydrology.basal_moulin_input',NaN=1,timeseries=1)/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.spcsediment_head','timeseries',1)/(md,fieldname='hydrology.spcsediment_head',timeseries=1)/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.sediment_compressibility','>',0.,'numel',[1])/(md,fieldname='hydrology.sediment_compressibility','>',0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.sediment_porosity',gt=0.,'numel',[1])/(md,fieldname='hydrology.sediment_porosity','>',0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.sediment_thickness',gt=0.,'numel',[1])/(md,fieldname='hydrology.sediment_thickness','>',0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.sediment_transmitivity','>=',0,'size',[md.mesh.numberofvertices,1])/(md,fieldname='hydrology.sediment_transmitivity',ge=0,size=[md.mesh.numberofvertices,1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.spcepl_head','timeseries',1)/(md,fieldname='hydrology.spcepl_head',timeseries=1)/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.mask_eplactive_node','size',[md.mesh.numberofvertices,1],'values',[0,1])/(md,fieldname='hydrology.mask_eplactive_node',size=[md.mesh.numberofvertices,1],values=[0,1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.epl_compressibility','>',0.,'numel',[1])/(md,fieldname='hydrology.epl_compressibility',gt=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.epl_porosity','>',0.,'numel',[1])/(md,fieldname='hydrology.epl_porosity',gt=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.epl_max_thickness','numel',[1],'>',0.)/(md,fieldname='hydrology.epl_max_thickness',numel=[1],gt=0.)/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.epl_initial_thickness','numel',[1],'>',0.)/(md,fieldname='hydrology.epl_initial_thickness',numel=[1],gt=0.)/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.epl_colapse_thickness','numel',[1],'>',0.)/(md,fieldname='hydrology.epl_colapse_thickness',numel=[1],gt=0.)/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.epl_thick_comp','numel',[1],'values',[0,1])/(md,fieldname='hydrology.epl_thick_comp',numel=[1],values=[0,1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.eplflip_lock','>=',0.,'numel',[1])/(md,fieldname='hydrology.eplflip_lock',ge=0.,numel=[1])/g' ./classes/hydrologydc.py: +sed -i 's/(md,'fieldname','hydrology.epl_conductivity','numel',[1],'>',0.)/(md,fieldname='hydrology.epl_conductivity',numel=[1],gt=0.)/g' ./classes/hydrologydc.py: +sed -i 's///g' ./classes/linearbasalforcings.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,timeseries=1)/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.deepwater_melting_rate','>=',0);/(md,'fieldname','basalforcings.deepwater_melting_rate',ge=0);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation);/(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.upperwater_elevation','<',0);/(md,'fieldname','basalforcings.upperwater_elevation','<',0);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.deepwater_melting_rate','>=',0);/(md,'fieldname','basalforcings.deepwater_melting_rate',ge=0);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation);/(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.upperwater_elevation','<',0);/(md,'fieldname','basalforcings.upperwater_elevation','<',0);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.groundedice_melting_rate','NaN',1,'timeseries',1)/(md,fieldname='basalforcings.groundedice_melting_rate',NaN=1,timeseries=1)/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.deepwater_melting_rate','>=',0);/(md,'fieldname','basalforcings.deepwater_melting_rate',ge=0);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation);/(md,'fieldname','basalforcings.deepwater_elevation','<',md.basalforcings.upperwater_elevation);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.upperwater_elevation','<',0);/(md,'fieldname','basalforcings.upperwater_elevation','<',0);/g' ./classes/linearbasalforcings.py: +sed -i 's/(md,'fieldname','basalforcings.geothermalflux','NaN',1,'timeseries',1,'>=',0)/(md,fieldname='basalforcings.geothermalflux',NaN=1,timeseries=1,ge=0)/g' ./classes/linearbasalforcings.py: +sed -i 's///g' ./classes/outputdefinition.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','outputdefinition.definitions','cell',1)/(md,fieldname='outputdefinition.definitions','cell',1)/g' ./classes/outputdefinition.py: +sed -i 's///g' ./classes/frictionweertman.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','friction.C','timeseries',1,'NaN',1)/(md,fieldname='friction.C',timeseries=1,NaN=1)/g' ./classes/frictionweertman.py: +sed -i 's/(md,'fieldname','friction.m','NaN',1,'size',[md.mesh.numberofelements])/(md,fieldname='friction.m',NaN=1,size=[md.mesh.numberofelements])/g' ./classes/frictionweertman.py: +sed -i 's///g' ./classes/miscellaneous.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','miscellaneous.name','empty',1)/(md,fieldname='miscellaneous.name','empty',1)/g' ./classes/miscellaneous.py: +sed -i 's///g' ./classes/mask.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','mask.ice_levelset' ,'size',[md.mesh.numberofvertices])/(md,fieldname='mask.ice_levelset' ,size=[md.mesh.numberofvertices])/g' ./classes/mask.py: +sed -i 's///g' ./classes/hydrologyshreve.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','hydrology.spcwatercolumn','timeseries',1)/(md,fieldname='hydrology.spcwatercolumn',timeseries=1)/g' ./classes/hydrologyshreve.py: +sed -i 's/(md,'fieldname','hydrology.stabilization','>=',0)/(md,fieldname='hydrology.stabilization',ge=0)/g' ./classes/hydrologyshreve.py: +sed -i 's///g' ./classes/private.py:from checkfield import checkfield +sed -i 's///g' ./classes/rifts.py:from checkfield import checkfield +sed -i 's/(md,'fieldname',"rifts.riftstruct[%d]['fill']" % i,'values',[WaterEnum(),AirEnum(),IceEnum(),MelangeEnum()])/(md,fieldname="rifts.riftstruct[%d]['fill']" % i,values=[WaterEnum(),AirEnum(),IceEnum(),MelangeEnum()])/g' ./classes/rifts.py: +sed -i 's///g' ./classes/groundingline.py:from checkfield import checkfield +sed -i 's/(md,fieldname='groundingline.migration',values=['None','AggressiveMigration','SoftMigration','SubelementMigration','SubelementMigration2','Contact','GroundingOnly'])/(md,fieldname='groundingline.migration',values=['None','AggressiveMigration','SoftMigration','SubelementMigration','SubelementMigration2','Contact','GroundingOnly'])/g' ./classes/groundingline.py: +sed -i 's///g' ./classes/taoinversion.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','inversion.iscontrol','values',[0, 1])/(md,fieldname='inversion.iscontrol',values=[0, 1])/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.incomplete_adjoint','values',[0, 1])/(md,fieldname='inversion.incomplete_adjoint',values=[0, 1])/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.control_parameters','cell',1,'values',supportedcontrols())/(md,fieldname='inversion.control_parameters','cell',1,values=supportedcontrols())/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.maxsteps','numel',1,'>=',0)/(md,fieldname='inversion.maxsteps',numel=1,ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.maxiter','numel',1,'>=',0)/(md,fieldname='inversion.maxiter',numel=1,ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.fatol','numel',1,'>=',0)/(md,fieldname='inversion.fatol',numel=1,ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.frtol','numel',1,'>=',0)/(md,fieldname='inversion.frtol',numel=1,ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.gatol','numel',1,'>=',0)/(md,fieldname='inversion.gatol',numel=1,ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.grtol','numel',1,'>=',0)/(md,fieldname='inversion.grtol',numel=1,ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.gttol','numel',1,'>=',0)/(md,fieldname='inversion.gttol',numel=1,ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.algorithm','values',{'blmvm','cg','lmvm'})/(md,fieldname='inversion.algorithm',values={'blmvm','cg','lmvm'})/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.algorithm','values',{'tao_blmvm','tao_cg','tao_lmvm'})/(md,fieldname='inversion.algorithm',values={'tao_blmvm','tao_cg','tao_lmvm'})/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions','size',[1, num_costfunc],'values',supportedcostfunctions())/(md,fieldname='inversion.cost_functions',size=[1, num_costfunc],values=supportedcostfunctions())/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices, num_costfunc],'>=',0)/(md,fieldname='inversion.cost_functions_coefficients',size=[md.mesh.numberofvertices, num_costfunc],ge=0)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices, num_controls])/(md,fieldname='inversion.min_parameters',size=[md.mesh.numberofvertices, num_controls])/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices, num_controls])/(md,fieldname='inversion.max_parameters',size=[md.mesh.numberofvertices, num_controls])/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],'NaN',1)/(md,fieldname='inversion.thickness_obs',size=[md.mesh.numberofvertices, 1],NaN=1)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices, 1],'NaN',1)/(md,fieldname='inversion.thickness_obs',size=[md.mesh.numberofvertices, 1],NaN=1)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices, 1],'NaN',1)/(md,fieldname='inversion.vx_obs',size=[md.mesh.numberofvertices, 1],NaN=1)/g' ./classes/taoinversion.py: +sed -i 's/(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices, 1],'NaN',1)/(md,fieldname='inversion.vy_obs',size=[md.mesh.numberofvertices, 1],NaN=1)/g' ./classes/taoinversion.py: +sed -i 's///g' ./classes/flowequation.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','flowequation.isSIA','numel',[1],'values',[0,1])/(md,fieldname='flowequation.isSIA',numel=[1],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.isSSA','numel',[1],'values',[0,1])/(md,fieldname='flowequation.isSSA',numel=[1],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.isL1L2','numel',[1],'values',[0,1])/(md,fieldname='flowequation.isL1L2',numel=[1],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.isHO','numel',[1],'values',[0,1])/(md,fieldname='flowequation.isHO',numel=[1],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.isFS','numel',[1],'values',[0,1])/(md,fieldname='flowequation.isFS',numel=[1],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.fe_SSA','values',['P1','P1bubble','P1bubblecondensed','P2','P2bubble'])/(md,fieldname='flowequation.fe_SSA',values=['P1','P1bubble','P1bubblecondensed','P2','P2bubble'])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.fe_HO' ,'values',['P1','P1bubble','P1bubblecondensed','P1xP2','P2xP1','P2','P2bubble','P1xP3','P2xP4'])/(md,fieldname='flowequation.fe_HO' ,values=['P1','P1bubble','P1bubblecondensed','P1xP2','P2xP1','P2','P2bubble','P1xP3','P2xP4'])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.fe_FS' ,'values',['P1P1','P1P1GLS','MINIcondensed','MINI','TaylorHood','XTaylorHood','OneLayerP4z','CrouzeixRaviart'])/(md,fieldname='flowequation.fe_FS' ,values=['P1P1','P1P1GLS','MINIcondensed','MINI','TaylorHood','XTaylorHood','OneLayerP4z','CrouzeixRaviart'])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.borderSSA','size',[md.mesh.numberofvertices],'values',[0,1])/(md,fieldname='flowequation.borderSSA',size=[md.mesh.numberofvertices],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.borderHO','size',[md.mesh.numberofvertices],'values',[0,1])/(md,fieldname='flowequation.borderHO',size=[md.mesh.numberofvertices],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.borderFS','size',[md.mesh.numberofvertices],'values',[0,1])/(md,fieldname='flowequation.borderFS',size=[md.mesh.numberofvertices],values=[0,1])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.augmented_lagrangian_r','numel',[1],'>',0.)/(md,fieldname='flowequation.augmented_lagrangian_r',numel=[1],gt=0.)/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.augmented_lagrangian_rhop','numel',[1],'>',0.)/(md,fieldname='flowequation.augmented_lagrangian_rhop',numel=[1],gt=0.)/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.augmented_lagrangian_rlambda','numel',[1],'>',0.)/(md,fieldname='flowequation.augmented_lagrangian_rlambda',numel=[1],gt=0.)/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.augmented_lagrangian_rholambda','numel',[1],'>',0.)/(md,fieldname='flowequation.augmented_lagrangian_rholambda',numel=[1],gt=0.)/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.XTH_theta','numel',[1],'>=',0.,'<',.5)/(md,fieldname='flowequation.XTH_theta',numel=[1],ge=0.,'<',.5)/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.vertex_equation','size',[md.mesh.numberofvertices],'values',[1,2])/(md,fieldname='flowequation.vertex_equation',size=[md.mesh.numberofvertices],values=[1,2])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.element_equation','size',[md.mesh.numberofelements],'values',[1,2])/(md,fieldname='flowequation.element_equation',size=[md.mesh.numberofelements],'values',[1,2])/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.vertex_equation','size',[md.mesh.numberofvertices],values=numpy.arange(0,8+1))/(md,fieldname='flowequation.vertex_equation',size=[md.mesh.numberofvertices],'values',numpy.arange(0,8+1))/g' ./classes/flowequation.py: +sed -i 's/(md,'fieldname','flowequation.element_equation','size',[md.mesh.numberofelements],values=numpy.arange(0,8+1))/(md,fieldname='flowequation.element_equation',size=[md.mesh.numberofelements],'values',numpy.arange(0,8+1))/g' ./classes/flowequation.py: +sed -i 's///g' ./classes/geometry.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','geometry.surface' ,'NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='geometry.surface' ,NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/geometry.py: +sed -i 's/(md,'fieldname','geometry.base' ,'NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='geometry.base' ,NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/geometry.py: +sed -i 's/(md,'fieldname','geometry.thickness','NaN',1,'size',[md.mesh.numberofvertices],'>',0,'timeseries',1)/(md,fieldname='geometry.thickness',NaN=1,size=[md.mesh.numberofvertices],gt=0,timeseries=1)/g' ./classes/geometry.py: +sed -i 's/(md,'fieldname','geometry.bed','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='geometry.bed',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/geometry.py: +sed -i 's///g' ./classes/inversion.py:from checkfield import checkfield +sed -i 's/(md,'fieldname','inversion.iscontrol','values',[0,1])/(md,fieldname='inversion.iscontrol',values=[0,1])/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.incomplete_adjoint','values',[0,1])/(md,fieldname='inversion.incomplete_adjoint',values=[0,1])/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.control_parameters','cell',1,'values',supportedcontrols())/(md,fieldname='inversion.control_parameters','cell',1,values=supportedcontrols())/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.nsteps','numel',[1],'>=',0)/(md,fieldname='inversion.nsteps',numel=[1],ge=0)/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.maxiter_per_step','size',[md.inversion.nsteps],'>=',0)/(md,fieldname='inversion.maxiter_per_step',size=[md.inversion.nsteps],ge=0)/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.step_threshold','size',[md.inversion.nsteps])/(md,fieldname='inversion.step_threshold',size=[md.inversion.nsteps])/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions','size',[num_costfunc],'values',supportedcostfunctions())/(md,fieldname='inversion.cost_functions',size=[num_costfunc],values=supportedcostfunctions())/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.cost_functions_coefficients','size',[md.mesh.numberofvertices,num_costfunc],'>=',0)/(md,fieldname='inversion.cost_functions_coefficients',size=[md.mesh.numberofvertices,num_costfunc],ge=0)/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.gradient_scaling','size',[md.inversion.nsteps,num_controls])/(md,fieldname='inversion.gradient_scaling',size=[md.inversion.nsteps,num_controls])/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.min_parameters','size',[md.mesh.numberofvertices,num_controls])/(md,fieldname='inversion.min_parameters',size=[md.mesh.numberofvertices,num_controls])/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.max_parameters','size',[md.mesh.numberofvertices,num_controls])/(md,fieldname='inversion.max_parameters',size=[md.mesh.numberofvertices,num_controls])/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.thickness_obs','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='inversion.thickness_obs',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.vx_obs','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='inversion.vx_obs',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/inversion.py: +sed -i 's/(md,'fieldname','inversion.vy_obs','size',[md.mesh.numberofvertices],'NaN',1)/(md,fieldname='inversion.vy_obs',size=[md.mesh.numberofvertices],NaN=1)/g' ./classes/inversion.py: +sed -i 's///g' ./classes/qmu.py:from checkfield import checkfield +sed -i 's///g' ./classes/mesh3dprisms.py:from checkfield import * +sed -i 's/(md,'fieldname','mesh.x','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='mesh.x',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.y','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='mesh.y',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.z','NaN',1,'size',[md.mesh.numberofvertices])/(md,fieldname='mesh.z',NaN=1,size=[md.mesh.numberofvertices])/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.elements','NaN',1,'>',0,'values',numpy.arange(1,md.mesh.numberofvertices+1))/(md,fieldname='mesh.elements',NaN=1,gt=0,values=numpy.arange(1,md.mesh.numberofvertices+1))/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.elements','size',[md.mesh.numberofelements,6])/(md,fieldname='mesh.elements',size=[md.mesh.numberofelements,6])/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.numberoflayers','>=',0)/(md,fieldname='mesh.numberoflayers',ge=0)/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.numberofelements','>',0)/(md,fieldname='mesh.numberofelements',gt=0)/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.numberofvertices','>',0)/(md,fieldname='mesh.numberofvertices',gt=0)/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.vertexonbase','size',[md.mesh.numberofvertices],'values',[0,1])/(md,fieldname='mesh.vertexonbase',size=[md.mesh.numberofvertices],values=[0,1])/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.vertexonsurface','size',[md.mesh.numberofvertices],'values',[0,1])/(md,fieldname='mesh.vertexonsurface',size=[md.mesh.numberofvertices],values=[0,1])/g' ./classes/mesh3dprisms.py: +sed -i 's/(md,'fieldname','mesh.average_vertex_connectivity','>=',24,'message',"'mesh.average_vertex_connectivity' should be at least 24 in 3d")/(md,fieldname='mesh.average_vertex_connectivity',ge=24,'message',"'mesh.average_vertex_connectivity' should be at least 24 in 3d")/g' ./classes/mesh3dprisms.py: +sed -i 's///g' ./consistency/checkfield.py:def checkfield(md,**kwargs): + Index: ../trunk-jpl/src/py3/mech/damagefrominversion.py =================================================================== --- ../trunk-jpl/src/py3/mech/damagefrominversion.py (revision 0) +++ ../trunk-jpl/src/py3/mech/damagefrominversion.py (revision 19895) @@ -0,0 +1,44 @@ +import numpy as npy + +def damagefrominversion(md): + ''' + compute ice shelf damage from inversion results + + This routine computes damage based on the analytical formalism of Borstad et + al. (2013, The Cryosphere). The model must contain inversion results for + ice rigidity. Ice rigidity B is assumed to be parameterized by the ice + temperature in md.materials.rheology_B. + + Usage: + damage=damagefrominversion(md) + + Example: + damage=damagefrominversion(md) + ''' + + # check inputs + if not hasattr(md.results,'strainrate'): + raise Exception('md.results.strainrate is not present. Calculate using md=mechanicalproperties(md,vx,vy)') + if not '2d' in md.mesh.__doc__: + raise Exception('only 2d (planview) model supported currently') + if any(md.flowequation.element_equation!=2): + raise Exception('Warning: the model has some non-SSA elements. These will be treated like SSA elements') + if npy.ndim(md.results.StressbalanceSolution.MaterialsRheologyBbar)==2: + Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar.reshape(-1,) + else: + Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar + if npy.ndim(md.materials.rheology_B)==2: + BT=md.materials.rheology_B.reshape(-1,) + else: + BT=md.materials.rheology_B + + damage=npy.zeros_like(Bi) + + # Damage where Bi softer than B(T) + pos=npy.nonzero(Bi0)) # longitudinal compression and lateral tension + a[pos]=e1[pos]/e2[pos] + ex[pos]=e2[pos] + pos2=npy.nonzero(e1<0 & e2<0 & npy.abs(e1)0 & e2>0 & npy.abs(e1)0: + print('Warning: ', len(pos), ' vertices have alpha within 1e-3 of -2') + a[pos]=-2+1e-3 + + if eq=='Weertman1D': + theta=1./8 + a=npy.zeros((md.mesh.numberofvertices,)) + elif eq=='Weertman2D': + theta=1./9 + a=npy.ones((md.mesh.numberofvertices,)) + elif eq=='Thomas': + theta=((1.+a+a**2+b**2)**((n-1.)/2.))/(npy.abs(2.+a)**n) + else: + raise ValueError('argument passed to "eq" not valid') + + alpha=a + beta=b + + return alpha,beta,theta,ex Index: ../trunk-jpl/src/py3/mech/analyticaldamage.py =================================================================== --- ../trunk-jpl/src/py3/mech/analyticaldamage.py (revision 0) +++ ../trunk-jpl/src/py3/mech/analyticaldamage.py (revision 19895) @@ -0,0 +1,106 @@ +import numpy as npy +from averaging import averaging +#from plotmodel import plotmodel +from thomasparams import thomasparams + +def analyticaldamage(md,**kwargs): + ''' + ANALYTICALDAMAGE - compute damage for an ice shelf + + This routine computes damage as a function of water/ice + material properties, ice thickness, strain rate, and ice + rigidity. The model must contain computed strain rates, + either from observed or modeled ice velocities. + + Available options: + -eq : analytical equation to use in the calculation. Must be one of: + 'Weertman1D' for a confined ice shelf free to flow in one direction + 'Weertman2D' for an unconfined ice shelf free to spread in any direction + 'Thomas' for a 2D ice shelf, taking into account full strain rate tensor (default) + -smoothing : the amount of smoothing to be applied to the strain rate data. + Type 'help averaging' for more information on its usage. + -coordsys : coordinate system for calculating the strain rate + components. Must be one of: + -sigmab : a compressive backstress term to be subtracted from the driving stress + in the damage calculation + + Return values: + 'damage' which is truncated in the range [0,1-1e-9] + + 'B' is the rigidity, which is equal to md.materials.rheology_B in areas outside + those defined by 'mask.' Within areas defined by 'mask,' where negative damage + is inferred, 'B' is updated to make damage equal to zero. + + 'backstress' is the inferred backstress necessary to balance the analytical solution + (keeping damage within its appropriate limits, e.g. D in [0,1]). + + Usage: + damage,B,backstress=analyticaldamage(md,kwargs) + + Example: + damage,B,backstress=analyticaldamage(md,eq='Weertman2D',smoothing=2,sigmab=10e3) + ''' + + #unpack kwargs + eq=kwargs.pop('eq','Thomas') + if 'eq' in kwargs: del kwargs['eq'] + smoothing=kwargs.pop('smoothing',0) + if 'smoothing' in kwargs: del kwargs['smoothing'] + coordsys=kwargs.pop('coordsys','longitudinal') + if 'coordsys' in kwargs: del kwargs['coordsys'] + sigmab=kwargs.pop('sigmab',0) + if 'sigmab' in kwargs: del kwargs['sigmab'] + assert len(kwargs)==0, 'error, unexpected or misspelled kwargs' + + if isinstance(sigmab,(int,float)): + sigmab=sigmab*npy.ones((md.mesh.numberofvertices,)) + + # check inputs + if 'strainrate' not in md.results.__dict__: + raise Exception('md.results.strainrate not present. Calculate using md=mechanicalproperties(md,vx,vy)') + if not '2d' in md.mesh.__doc__: + raise Exception('only 2d (planview) model supported currently') + if npy.any(md.flowequation.element_equation!=2): + print('Warning: the model has some non SSA elements. These will be treated like SSA elements') + + a,b,theta,ex=thomasparams(md,eq=eq,smoothing=smoothing,coordsys=coordsys) + + # spreading stress + rhoi=md.materials.rho_ice + rhow=md.materials.rho_water + C=0.5*rhoi*md.constants.g*(1.-rhoi/rhow) + T=C*md.geometry.thickness + + # rheology + B=md.materials.rheology_B + n=averaging(md,md.materials.rheology_n,0) + + D=1.-(1.+a+a**2+b**2)**((n-1.)/(2.*n))/npy.abs(ex)**(1./n)*(T-sigmab)/B/(2.+a)/npy.sign(ex) + + # D>1 where (2+a).*sign(ex)<0, compressive regions where high backstress needed + pos=npy.nonzero(D>1) + D[pos]=0 + + backstress=npy.zeros((md.mesh.numberofvertices,)) + + # backstress to bring D down to one + backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*npy.sign(ex[pos])*(2.+a[pos])*npy.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos]) + + pos=npy.nonzero(D<0) + #mask=ismember(1:md.mesh.numberofvertices,pos); + D[pos]=0 + + # backstress to bring negative damage to zero + backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*npy.sign(ex[pos])*(2.+a[pos])*npy.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos]) + + pos=npy.nonzero(backstress<0) + backstress[pos]=0 + + # rigidity from Thomas relation for D=0 and backstress=0 + B=npy.sign(ex)/(2.+a)*(1.+a+a**2)**((n-1.)/2./n)*T/(npy.abs(ex)**(1./n)) + pos=npy.nonzero(B<0) + B[pos]=md.materials.rheology_B[pos] + + damage=D + + return damage, B, backstress Index: ../trunk-jpl/src/py3/mech/mechanicalproperties.py =================================================================== --- ../trunk-jpl/src/py3/mech/mechanicalproperties.py (revision 0) +++ ../trunk-jpl/src/py3/mech/mechanicalproperties.py (revision 19895) @@ -0,0 +1,160 @@ +import numpy as npy +from GetNodalFunctionsCoeff import GetNodalFunctionsCoeff +from results import results +from averaging import averaging + +def mechanicalproperties(md,vx,vy,**kwargs): + """ + MECHANICALPROPERTIES - compute stress and strain rate for a goven velocity + + this routine computes the components of the stress tensor + strain rate tensor and their respective principal directions. + the results are in the model md: md.results + + Usage: + md=mechanicalproperties(md,vx,vy) + + Example: + md=mechanicalproperties(md,md.initialization.vx,md.initialization.vy) + md=mechanicalproperties(md,md.inversion.vx_obs,md.inversion.vy_obs) + """ + + #some checks + if len(vx)!=md.mesh.numberofvertices or len(vy)!=md.mesh.numberofvertices: + raise ValueError('the input velocity should be of size ' + md.mesh.numberofvertices) + + #if md.mesh.dimension!=2: + # raise StandardError('only 2D model supported currently') + + if npy.any(md.flowequation.element_equation!=2): + print('Warning: the model has some non SSA elements. These will be treated like SSA elements') + + #unpack kwargs + if 'damage' in kwargs: + damage=kwargs.pop('damage') + if len(damage)!=md.mesh.numberofvertices: + raise ValueError('if damage is supplied it should be of size ' + md.mesh.numberofvertices) + if npy.ndim(damage)==2: + damage=damage.reshape(-1,) + else: damage=None + + if npy.ndim(vx)==2: + vx=vx.reshape(-1,) + if npy.ndim(vy)==2: + vy=vy.reshape(-1,) + + #initialization + numberofelements=md.mesh.numberofelements + numberofvertices=md.mesh.numberofvertices + index=md.mesh.elements + summation=npy.array([[1],[1],[1]]) + directionsstress=npy.zeros((numberofelements,4)) + directionsstrain=npy.zeros((numberofelements,4)) + valuesstress=npy.zeros((numberofelements,2)) + valuesstrain=npy.zeros((numberofelements,2)) + + #compute nodal functions coefficients N(x,y)=alpha x + beta y +gamma + alpha,beta=GetNodalFunctionsCoeff(index,md.mesh.x,md.mesh.y)[0:2] + + #compute shear + vxlist=vx[index-1]/md.constants.yts + vylist=vy[index-1]/md.constants.yts + ux=npy.dot((vxlist*alpha),summation).reshape(-1,) + uy=npy.dot((vxlist*beta),summation).reshape(-1,) + vx=npy.dot((vylist*alpha),summation).reshape(-1,) + vy=npy.dot((vylist*beta),summation).reshape(-1,) + uyvx=(vx+uy)/2. + #clear vxlist vylist + + #compute viscosity + nu=npy.zeros((numberofelements,)) + B_bar=npy.dot(md.materials.rheology_B[index-1],summation/3.).reshape(-1,) + power=((md.materials.rheology_n-1.)/(2.*md.materials.rheology_n)).reshape(-1,) + second_inv=(ux**2.+vy**2.+((uy+vx)**2.)/4.+ux*vy).reshape(-1,) + + #some corrections + location=npy.nonzero(npy.logical_and(second_inv==0,power!=0)) + nu[location]=10^18 #arbitrary maximum viscosity to apply where there is no effective shear + + if 'matice' in md.materials.__module__: + location=npy.nonzero(second_inv) + nu[location]=B_bar[location]/(second_inv[location]**power[location]) + location=npy.nonzero(npy.logical_and(second_inv==0,power==0)) + nu[location]=B_bar[location] + location=npy.nonzero(npy.logical_and(second_inv==0,power!=0)) + nu[location]=10^18 + elif 'matdamageice' in md.materials.__module__ and damage is not None: + print('computing damage-dependent properties!') + Zinv=npy.dot(1-damage[index-1],summation/3.).reshape(-1,) + location=npy.nonzero(second_inv) + nu[location]=Zinv[location]*B_bar[location]/npy.power(second_inv[location],power[location]) + location=npy.nonzero(npy.logical_and(second_inv==0,power==0)) + nu[location]=Zinv[location]*B_bar[location] + #clear Zinv + else: + raise Exception('class of md.materials (' + md.materials.__module__ + ') not recognized or not supported') + + #compute stress + tau_xx=nu*ux + tau_yy=nu*vy + tau_xy=nu*uyvx + + #compute principal properties of stress + for i in npy.arange(numberofelements): + + #compute stress and strainrate matrices + stress=npy.array([ [tau_xx[i], tau_xy[i]], [tau_xy[i], tau_yy[i]] ]) + strain=npy.array([ [ux[i], uyvx[i]], [uyvx[i], vy[i]] ]) + + #eigenvalues and vectors for stress + value,directions=npy.linalg.eig(stress); + idx=abs(value).argsort()[::-1] # sort in descending order + value=value[idx] + directions=directions[:,idx] + valuesstress[i,:]=[value[0],value[1]] + directionsstress[i,:]=directions.transpose().flatten() + + #eigenvalues and vectors for strain + value,directions=npy.linalg.eig(strain); + idx=abs(value).argsort()[::-1] # sort in descending order + value=value[idx] + directions=directions[:,idx] + valuesstrain[i,:]=[value[0],value[1]] + directionsstrain[i,:]=directions.transpose().flatten() + + ##plug onto the model + ##NB: Matlab sorts the eigen value in increasing order, we want the reverse + stress=results() + stress.xx=tau_xx + stress.yy=tau_yy + stress.xy=tau_xy + stress.principalvalue1=valuesstress[:,0] + stress.principalaxis1=directionsstress[:,0:2] + stress.principalvalue2=valuesstress[:,1] + stress.principalaxis2=directionsstress[:,2:4] + stress.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2) + md.results.stress=stress + + strainrate=results() + strainrate.xx=ux*md.constants.yts #strain rate in 1/a instead of 1/s + strainrate.yy=vy*md.constants.yts + strainrate.xy=uyvx*md.constants.yts + strainrate.principalvalue1=valuesstrain[:,0]*md.constants.yts + strainrate.principalaxis1=directionsstrain[:,0:2] + strainrate.principalvalue2=valuesstrain[:,1]*md.constants.yts + strainrate.principalaxis2=directionsstrain[:,2:4] + strainrate.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(strainrate.xx**2+strainrate.yy**2+2.*strainrate.xy**2) + md.results.strainrate=strainrate + + deviatoricstress=results() + deviatoricstress.xx=tau_xx + deviatoricstress.yy=tau_yy + deviatoricstress.xy=tau_xy + deviatoricstress.principalvalue1=valuesstress[:,0] + deviatoricstress.principalaxis1=directionsstress[:,1:2] + deviatoricstress.principalvalue2=valuesstress[:,1] + deviatoricstress.principalaxis2=directionsstress[:,2:4] + deviatoricstress.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2) + md.results.deviatoricstress=deviatoricstress + + return md Index: ../trunk-jpl/src/py3/mech/steadystateiceshelftemp.py =================================================================== --- ../trunk-jpl/src/py3/mech/steadystateiceshelftemp.py (revision 0) +++ ../trunk-jpl/src/py3/mech/steadystateiceshelftemp.py (revision 19895) @@ -0,0 +1,65 @@ +import numpy as npy + +def steadystateiceshelftemp(md,surfacetemp,basaltemp): + """ + Compute the depth-averaged steady-state temperature of an ice shelf + This routine computes the depth-averaged temperature accounting for vertical advection + and diffusion of heat into the base of the ice shelf as a function of surface and basal + temperature and the basal melting rate. Horizontal advection is ignored. + The solution is a depth-averaged version of Equation 25 in Holland and Jenkins (1999). + + In addition to supplying md, the surface and basal temperatures of the ice shelf must be supplied in degrees Kelvin. + + The model md must also contain the fields: + md.geometry.thickness + md.basalforcings.floatingice_melting_rate (positive for melting, negative for freezing) + + Usage: + temperature=steadystateiceshelftemp(md,surfacetemp,basaltemp) + """ + + if len(md.geometry.thickness)!=md.mesh.numberofvertices: + raise ValueError('steadystateiceshelftemp error message: thickness should have a length of ' + md.mesh.numberofvertices) + + #surface and basal temperatures in degrees C + if len(surfacetemp)!=md.mesh.numberofvertices: + raise ValueError('steadystateiceshelftemp error message: surfacetemp should have a length of ' + md.mesh.numberofvertices) + + if len(basaltemp)!=md.mesh.numberofvertices: + raise ValueError('steadystateiceshelftemp error message: basaltemp should have a length of ' +md.mesh.numberofvertices) + + # Convert temps to Celsius for Holland and Jenkins (1999) equation + Ts=-273.15+surfacetemp + Tb=-273.15+basaltemp + + Hi=md.geometry.thickness + ki=1.14e-6*md.constants.yts # ice shelf thermal diffusivity from Holland and Jenkins (1999) converted to m^2/yr + + #vertical velocity of ice shelf, calculated from melting rate + wi=md.materials.rho_water/md.materials.rho_ice*md.basalforcings.floatingice_melting_rate + + #temperature profile is linear if melting rate is zero, depth-averaged temp is simple average in this case + temperature=(Ts+Tb)/2 # where wi~=0 + + pos=npy.nonzero(abs(wi)>=1e-4) # to avoid division by zero + + npy.seterr(over='raise',divide='raise') # raise errors if floating point exceptions are encountered in following calculation + #calculate depth-averaged temperature (in Celsius) + try: + temperature[pos]=-( (Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos] - (Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])*npy.exp(Hi[pos]*wi[pos]/ki) )/( Hi[pos]*(npy.exp(Hi[pos]*wi[pos]/ki)-1)) + except FloatingPointError: + print('steadystateiceshelf warning: overflow encountered in multipy/divide/exp, trying another formulation.') + temperature[pos]=-( ((Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos])/npy.exp(Hi[pos]*wi[pos]/ki) - Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])/( Hi[pos]*(1-npy.exp(-Hi[pos]*wi[pos]/ki))) + + #temperature should not be less than surface temp + pos=npy.nonzero(temperaturemd.materials.rheology_B) + Bi[pos]=md.materials.rheology_B[pos] + + # analytical backstress solution + backstress=T-Bi*npy.sign(ex0)*(2+a0)*npy.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n)) + backstress[npy.nonzero(backstress<0)]=0 + + return backstress Index: ../trunk-jpl/src/py3/mech/calcbackstress.py =================================================================== --- ../trunk-jpl/src/py3/mech/calcbackstress.py (revision 0) +++ ../trunk-jpl/src/py3/mech/calcbackstress.py (revision 19895) @@ -0,0 +1,66 @@ +import numpy as npy +from averaging import averaging +from thomasparams import thomasparams + +def calcbackstress(md,**kwargs): + ''' + Compute ice shelf backstress. + + This routine computes backstress based on the analytical formalism of + Thomas (1973) and Borstad et al. (2013, The Cryosphere) based on the + ice rigidity, thickness, the densities of ice and seawater, and + (optionally) damage. Strain rates must also be included, either from + observed or modeled velocities. + + Available options: + - 'smoothing' : the amount of smoothing to be applied to the strain rate data. + Type 'help averaging' for more information on its + usage. Defaults to 0. + - 'coordsys' : coordinate system for calculating the strain rate + components. Must be one of: + 'longitudinal': x axis aligned along a flowline at every point (default) + 'principal': x axis aligned along maximum principal strain rate + at every point + 'xy': x and y axes same as in polar stereographic projection + + Return values: + 'backstress' is the inferred backstress based on the analytical + solution for ice shelf creep + + Usage: + backstress=calcbackstress(md,options) + + Example: + backstress=calcbackstress(md,'smoothing',2,'coordsys','longitudinal') + ''' + + # unpack kwargs + smoothing=kwargs.pop('smoothing',0) + if 'smoothing' in kwargs: del kwargs['smoothing'] + coordsys=kwargs.pop('coordsys','longitudinal') + if 'coordsys' in kwargs: del kwargs['coordsys'] + assert len(kwargs)==0, 'error, unexpected or misspelled kwargs' + + # some checks + if not hasattr(md.results,'strainrate'): + raise Exception('md.results.strainrate not present. Calculate using md=mechanicalproperties(md,vx,vy)') + if not '2d' in md.mesh.__doc__: + raise Exception('only 2d (planview) model supported currently') + if any(md.flowequation.element_equation!=2): + raise Exception('Warning: the model has some non-SSA elements. These will be treated like SSA elements') + + T=0.5*md.materials.rho_ice*md.constants.g*(1-md.materials.rho_ice/md.materials.rho_water)*md.geometry.thickness + n=averaging(md,md.materials.rheology_n,0) + B=md.materials.rheology_B + if md.damage.isdamage: + D=md.damage.D + else: + D=0. + + a0,b0,theta0,ex0=thomasparams(md,eq='Thomas',smoothing=smoothing,coordsys=coordsys) + + # analytical backstress solution + backstress=T-(1.-D)*B*npy.sign(ex0)*(2+a0)*npy.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n)) + backstress[npy.nonzero(backstress<0)]=0 + + return backstress Index: ../trunk-jpl/src/py3/mech/robintemperature.py =================================================================== --- ../trunk-jpl/src/py3/mech/robintemperature.py (revision 0) +++ ../trunk-jpl/src/py3/mech/robintemperature.py (revision 19895) @@ -0,0 +1,42 @@ +import numpy as npy +from scipy.special import erf + +def robintemperature(heatflux,accumrate,thickness,surftemp,z): + ''' + Compute vertical temperature profile of an ice sheet (Robin, 1955) + + This routine computes the vertical temperature profile of an ice sheet + according to the solution of Robin (1955), neglecting friction and + horizontal advection. The solution is thus most appropriate at an ice + divide. + + The coordinate system for the solution runs from z=0 at the base + to z=H at the surface of the ice. + + Parameters (SI units): + -heatflux Geothermal heat flux (W m^-2) + -accumrate Surface accumulation rate (m s^-1 ice equivalent) + -thickness Ice thickness (m) + -surftemp Surface temperature (K) + -z Vertical position at which to calculate temperature + (z can be a scalar or a vector) + + Returns a vector the same length as z containing the temperature in K + + Usage: + tprofile=robintemperature(heatflux,accumrate,thickness,surftemp,z) + ''' + + # some constants (from Holland and Jenkins, 1999) + alphaT=1.14e-6 # thermal diffusivity (m^2 s^-1) + c=2009. # specific heat capacity (J kg^-1 K^-1) + rho=917. # ice density (kg m^-3) + + #create vertical coordinate variable + zstar=npy.sqrt(2.*alphaT*thickness/accumrate) + + tprofile=surftemp+npy.sqrt(2.*thickness*npy.pi/accumrate/alphaT)*(-heatflux)/2./rho/c*(erf(z/zstar)-erf(thickness/zstar)) + + return tprofile + # difference between surface and base temperature for check (Cuffey2010 p412): + # print tprofile-surftemp Index: ../trunk-jpl/src/py3/boundaryconditions/SetIceSheetBC.py =================================================================== --- ../trunk-jpl/src/py3/boundaryconditions/SetIceSheetBC.py (revision 0) +++ ../trunk-jpl/src/py3/boundaryconditions/SetIceSheetBC.py (revision 19895) @@ -0,0 +1,59 @@ +import os +import numpy +from ContourToMesh import ContourToMesh + +def SetIceSheetBC(md): + """ + SETICESHEETBC - Create the boundary conditions for stressbalance and thermal models for an IceSheet with no Ice Front + + Usage: + md=SetIceSheetBC(md) + + See also: SETICESHELFBC, SETMARINEICESHEETBC + """ + + #node on Dirichlet + pos=numpy.nonzero(md.mesh.vertexonboundary) + md.stressbalance.spcvx=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.spcvy=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.spcvz=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.spcvx[pos]=0 + md.stressbalance.spcvy[pos]=0 + md.stressbalance.spcvz[pos]=0 + md.stressbalance.referential=float('nan')*numpy.ones((md.mesh.numberofvertices,6)) + md.stressbalance.loadingforce=0*numpy.ones((md.mesh.numberofvertices,3)) + + #Dirichlet Values + if isinstance(md.inversion.vx_obs,numpy.ndarray) and numpy.size(md.inversion.vx_obs,axis=0)==md.mesh.numberofvertices and isinstance(md.inversion.vy_obs,numpy.ndarray) and numpy.size(md.inversion.vy_obs,axis=0)==md.mesh.numberofvertices: + print(" boundary conditions for stressbalance model: spc set as observed velocities") + md.stressbalance.spcvx[pos]=md.inversion.vx_obs[pos] + md.stressbalance.spcvy[pos]=md.inversion.vy_obs[pos] + else: + print(" boundary conditions for stressbalance model: spc set as zero") + + #No ice front -> do nothing + + #Create zeros basalforcings and smb + md.smb.initialize(md) + md.basalforcings.initialize(md) + + #Deal with other boundary conditions + if numpy.all(numpy.isnan(md.balancethickness.thickening_rate)): + md.balancethickness.thickening_rate=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no balancethickness.thickening_rate specified: values set as zero") + md.masstransport.spcthickness=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + md.balancethickness.spcthickness=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + md.damage.spcdamage=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + + if isinstance(md.initialization.temperature,numpy.ndarray) and numpy.size(md.initialization.temperature,axis=0)==md.mesh.numberofvertices: + md.thermal.spctemperature=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + if hasattr(md.mesh,'vertexonsurface'): + pos=numpy.nonzero(md.mesh.vertexonsurface)[0] + md.thermal.spctemperature[pos]=md.initialization.temperature[pos] #impose observed temperature on surface + if not isinstance(md.basalforcings.geothermalflux,numpy.ndarray) or not numpy.size(md.basalforcings.geothermalflux)==md.mesh.numberofvertices: + md.basalforcings.geothermalflux=50.*10**-3*numpy.ones((md.mesh.numberofvertices,1)) #50 mW/m^2 + else: + print(" no thermal boundary conditions created: no observed temperature found") + + return md + Index: ../trunk-jpl/src/py3/boundaryconditions/PattynSMB.py =================================================================== --- ../trunk-jpl/src/py3/boundaryconditions/PattynSMB.py (revision 0) +++ ../trunk-jpl/src/py3/boundaryconditions/PattynSMB.py (revision 19895) @@ -0,0 +1,50 @@ +import os +import numpy as npy +def PattynSMB(md,Tf): + """ + PATTYNSMB- Compute SMB over Antarctica (from Pattyn 2006, pg. 18, "GRANTISM: An ExcelTM model for Greenland + and Antarctic ice-sheet response to climate changes") + + Usage: + md=PattynSMB(md,Tf) + + where Tf is a background forcing temperature ("an anomalous temperature relative to the present conditions) + + + See also: SETICESHELFBC, SETMARINEICESHEETBC + """ + + # Tma : Mean annual surface temperature in [deg C] + # Tms : Mean summer temperature in [deg C] + # h : Surface/bedrock elevation (I assume in meters but paper does not specify) + # phi : Latitude in degrees SOUTH + # lambda : Longitude in degrees WEST + # Tf : Background forcing temperature ("an anomalous temperature relative to the present conditions) + # ACCdot : Accumulation rate in units of [m/a] ice equivalent + # ABLdot : Surface ablation rate in [m/a] ice equivalent + + #Double check lat and long exist: + if npy.any(npy.isnan(md.mesh.lat)): + raise IOError('PattynSMB error message: md.mesh.lat field required') + + # Calculate mean annual surface temperature, Eqn (11) + # Here, -0.012 is the atmospheric Lapse rate from sea level in deg/m. + # It is multiplied by surface elevation from sea level + Tma = -15.15 - 0.012*md.geometry.surface + + + # Calculate summer temperature, Eqn (12) + # No melting at PIG in mean conditions - need about 6 degress Tf to start having a negative yearly SMB + Tms = 16.81 - 0.00692*md.geometry.surface - 0.27937*npy.abs(md.mesh.lat) + Tf + Tms= Tms[0] + + # Calculate Accumulation perturbation with Tf forcing, Eqn (9) + ACCdot = 2.5*2**((Tma+Tf)/10.) - 2.5*2**(Tma/10.) + + # Calculate Ablation, Eqn (10) (use for both Antarctica & Greenland), max melt is 10m/a + ABLdot=0.*npy.ones(md.mesh.numberofvertices) + pos=npy.nonzero(Tms>=0) + ABLdot[pos]=npy.minimum(1.4*Tms[pos],10) + + smb=ACCdot-ABLdot + return smb[0] Index: ../trunk-jpl/src/py3/boundaryconditions/SetIceShelfBC.py =================================================================== --- ../trunk-jpl/src/py3/boundaryconditions/SetIceShelfBC.py (revision 0) +++ ../trunk-jpl/src/py3/boundaryconditions/SetIceShelfBC.py (revision 19895) @@ -0,0 +1,101 @@ +import os +import numpy +from ContourToMesh import ContourToMesh +import MatlabFuncs as m + +def SetIceShelfBC(md,icefrontfile=''): + """ + SETICESHELFBC - Create the boundary conditions for stressbalance and thermal models for a Ice Shelf with Ice Front + + Neumann BC are used on the ice front (an ARGUS contour around the ice front + must be given in input) + Dirichlet BC are used elsewhere for stressbalance + + Usage: + md=SetIceShelfBC(md,varargin) + + Example: + md=SetIceShelfBC(md); + md=SetIceShelfBC(md,'Front.exp'); + + See also: SETICESHEETBC, SETMARINEICESHEETBC + """ + + #node on Dirichlet (boundary and ~icefront) + if icefrontfile: + if not os.path.exists(icefrontfile): + raise IOError("SetIceShelfBC error message: ice front file '%s' not found." % icefrontfile) + [nodeinsideicefront,dum]=ContourToMesh(md.mesh.elements,md.mesh.x,md.mesh.y,icefrontfile,'node',2) + nodeonicefront=numpy.logical_and(md.mesh.vertexonboundary,nodeinsideicefront.reshape(-1)) + else: + nodeonicefront=numpy.zeros((md.mesh.numberofvertices),bool) + +# pos=find(md.mesh.vertexonboundary & ~nodeonicefront); + pos=numpy.nonzero(numpy.logical_and(md.mesh.vertexonboundary,numpy.logical_not(nodeonicefront)))[0] + md.stressbalance.spcvx=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.spcvy=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.spcvz=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.referential=float('nan')*numpy.ones((md.mesh.numberofvertices,6)) + md.stressbalance.loadingforce=0*numpy.ones((md.mesh.numberofvertices,3)) + + #Icefront position + pos=numpy.nonzero(nodeonicefront)[0] + md.mask.ice_levelset[pos]=0 + + #First find segments that are not completely on the front + if m.strcmp(md.mesh.elementtype(),'Penta'): + numbernodesfront=4; + elif m.strcmp(md.mesh.elementtype(),'Tria'): + numbernodesfront=2; + else: + raise error('mesh type not supported yet') + if any(md.mask.ice_levelset<=0): + values=md.mask.ice_levelset[md.mesh.segments[:,0:-1]-1] + segmentsfront=1-values + numpy.sum(segmentsfront,axis=1)!=numbernodesfront + segments=numpy.nonzero(numpy.sum(segmentsfront,axis=1)!=numbernodesfront)[0] + #Find all nodes for these segments and spc them + pos=md.mesh.segments[segments,0:-1]-1 + else: + pos=numpy.nonzero(md.mesh.vertexonboundary)[0] + md.stressbalance.spcvx[pos]=0 + md.stressbalance.spcvy[pos]=0 + md.stressbalance.spcvz[pos]=0 + + #Dirichlet Values + if isinstance(md.inversion.vx_obs,numpy.ndarray) and numpy.size(md.inversion.vx_obs,axis=0)==md.mesh.numberofvertices and isinstance(md.inversion.vy_obs,numpy.ndarray) and numpy.size(md.inversion.vy_obs,axis=0)==md.mesh.numberofvertices: + #reshape to rank-2 if necessary to match spc arrays + if numpy.ndim(md.inversion.vx_obs)==1: + md.inversion.vx_obs=md.inversion.vx_obs.reshape(-1,1) + if numpy.ndim(md.inversion.vy_obs)==1: + md.inversion.vy_obs=md.inversion.vy_obs.reshape(-1,1) + print(" boundary conditions for stressbalance model: spc set as observed velocities") + md.stressbalance.spcvx[pos]=md.inversion.vx_obs[pos] + md.stressbalance.spcvy[pos]=md.inversion.vy_obs[pos] + else: + print(" boundary conditions for stressbalance model: spc set as zero") + + #Create zeros basalforcings and smb + md.smb.initialize(md) + md.basalforcings.initialize(md) + + #Deal with other boundary conditions + if numpy.all(numpy.isnan(md.balancethickness.thickening_rate)): + md.balancethickness.thickening_rate=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no balancethickness.thickening_rate specified: values set as zero") + md.masstransport.spcthickness=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + md.balancethickness.spcthickness=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + md.damage.spcdamage=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + + if isinstance(md.initialization.temperature,numpy.ndarray) and numpy.size(md.initialization.temperature,axis=0)==md.mesh.numberofvertices: + md.thermal.spctemperature=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + if hasattr(md.mesh,'vertexonsurface'): + pos=numpy.nonzero(md.mesh.vertexonsurface)[0] + md.thermal.spctemperature[pos]=md.initialization.temperature[pos] #impose observed temperature on surface + if not isinstance(md.basalforcings.geothermalflux,numpy.ndarray) or not numpy.size(md.basalforcings.geothermalflux,axis=0)==md.mesh.numberofvertices: + md.basalforcings.geothermalflux=numpy.zeros((md.mesh.numberofvertices,1)) + else: + print(" no thermal boundary conditions created: no observed temperature found") + + return md + Index: ../trunk-jpl/src/py3/boundaryconditions/SetMarineIceSheetBC.py =================================================================== --- ../trunk-jpl/src/py3/boundaryconditions/SetMarineIceSheetBC.py (revision 0) +++ ../trunk-jpl/src/py3/boundaryconditions/SetMarineIceSheetBC.py (revision 19895) @@ -0,0 +1,111 @@ +import os +import numpy +from ContourToMesh import ContourToMesh +import MatlabFuncs as m + +def SetMarineIceSheetBC(md,icefrontfile=''): + """ + SETICEMARINESHEETBC - Create the boundary conditions for stressbalance and thermal models for a Marine Ice Sheet with Ice Front + + Neumann BC are used on the ice front (an ARGUS contour around the ice front + can be given in input, or it will be deduced as onfloatingice & onboundary) + Dirichlet BC are used elsewhere for stressbalance + + Usage: + md=SetMarineIceSheetBC(md,icefrontfile) + md=SetMarineIceSheetBC(md) + + Example: + md=SetMarineIceSheetBC(md,'Front.exp') + md=SetMarineIceSheetBC(md) + + See also: SETICESHELFBC, SETMARINEICESHEETBC + """ + + #node on Dirichlet (boundary and ~icefront) + if icefrontfile: + #User provided Front.exp, use it + if not os.path.exists(icefrontfile): + raise IOError("SetMarineIceSheetBC error message: ice front file '%s' not found." % icefrontfile) + [incontour,dum]=ContourToMesh(md.mesh.elements,md.mesh.x,md.mesh.y,icefrontfile,'node',2) + vertexonicefront=numpy.logical_and(md.mesh.vertexonboundary,incontour.reshape(-1)) + else: + #Guess where the ice front is + vertexonfloatingice=numpy.zeros((md.mesh.numberofvertices,1)) + pos=numpy.nonzero(numpy.sum(md.mask.groundedice_levelset[md.mesh.elements-1]<0.,axis=1) >0.)[0] + vertexonfloatingice[md.mesh.elements[pos].astype(int)-1]=1. + vertexonicefront=numpy.logical_and(numpy.reshape(md.mesh.vertexonboundary,(-1,1)),vertexonfloatingice>0.) + +# pos=find(md.mesh.vertexonboundary & ~vertexonicefront); + pos=numpy.nonzero(numpy.logical_and(md.mesh.vertexonboundary,numpy.logical_not(vertexonicefront)))[0] + if not numpy.size(pos): + print("SetMarineIceSheetBC warning: ice front all around the glacier, no dirichlet found. Dirichlet must be added manually.") + + md.stressbalance.spcvx=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.spcvy=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.spcvz=float('nan')*numpy.ones(md.mesh.numberofvertices) + md.stressbalance.referential=float('nan')*numpy.ones((md.mesh.numberofvertices,6)) + md.stressbalance.loadingforce=0*numpy.ones((md.mesh.numberofvertices,3)) + + #Position of ice front + pos=numpy.nonzero(vertexonicefront)[0] + md.mask.ice_levelset[pos]=0 + + #First find segments that are not completely on the front + if m.strcmp(md.mesh.elementtype(),'Penta'): + numbernodesfront=4 + elif m.strcmp(md.mesh.elementtype(),'Tria'): + numbernodesfront=2 + else: + raise Exception("Mesh type not supported") + if any(md.mask.ice_levelset<=0): + values=md.mask.ice_levelset[md.mesh.segments[:,0:-1]-1] + segmentsfront=1-values + numpy.sum(segmentsfront,axis=1)!=numbernodesfront + segments=numpy.nonzero(numpy.sum(segmentsfront,axis=1)!=numbernodesfront)[0] + #Find all nodes for these segments and spc them + pos=md.mesh.segments[segments,0:-1]-1 + else: + pos=numpy.nonzero(md.mesh.vertexonboundary)[0] + md.stressbalance.spcvx[pos]=0 + md.stressbalance.spcvy[pos]=0 + md.stressbalance.spcvz[pos]=0 + + #Dirichlet Values + if isinstance(md.inversion.vx_obs,numpy.ndarray) and numpy.size(md.inversion.vx_obs,axis=0)==md.mesh.numberofvertices and isinstance(md.inversion.vy_obs,numpy.ndarray) and numpy.size(md.inversion.vy_obs,axis=0)==md.mesh.numberofvertices: + print(" boundary conditions for stressbalance model: spc set as observed velocities") + md.stressbalance.spcvx[pos]=md.inversion.vx_obs[pos] + md.stressbalance.spcvy[pos]=md.inversion.vy_obs[pos] + else: + print(" boundary conditions for stressbalance model: spc set as zero") + + md.hydrology.spcwatercolumn=numpy.zeros((md.mesh.numberofvertices,2)) + pos=numpy.nonzero(md.mesh.vertexonboundary)[0] + md.hydrology.spcwatercolumn[pos,0]=1 + + #Create zeros basalforcings and smb + md.smb.initialize(md) + md.basalforcings.initialize(md) + + #Deal with other boundary conditions + if numpy.all(numpy.isnan(md.balancethickness.thickening_rate)): + md.balancethickness.thickening_rate=numpy.zeros((md.mesh.numberofvertices,1)) + print(" no balancethickness.thickening_rate specified: values set as zero") + + md.masstransport.spcthickness=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + md.balancethickness.spcthickness=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + md.damage.spcdamage=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + + if isinstance(md.initialization.temperature,numpy.ndarray) and numpy.size(md.initialization.temperature,axis=0)==md.mesh.numberofvertices: + md.thermal.spctemperature=float('nan')*numpy.ones((md.mesh.numberofvertices,1)) + if hasattr(md.mesh,'vertexonsurface'): + pos=numpy.nonzero(md.mesh.vertexonsurface)[0] + md.thermal.spctemperature[pos]=md.initialization.temperature[pos] #impose observed temperature on surface + if not isinstance(md.basalforcings.geothermalflux,numpy.ndarray) or not numpy.size(md.basalforcings.geothermalflux,axis=0)==md.mesh.numberofvertices: + md.basalforcings.geothermalflux=numpy.zeros((md.mesh.numberofvertices,1)) + md.basalforcings.geothermalflux[numpy.nonzero(md.mask.groundedice_levelset>0.)]=50.*10.**-3 #50mW/m2 + else: + print(" no thermal boundary conditions created: no observed temperature found") + + return md + Index: ../trunk-jpl/src/py3/dev/devpath.py =================================================================== --- ../trunk-jpl/src/py3/dev/devpath.py (revision 0) +++ ../trunk-jpl/src/py3/dev/devpath.py (revision 19895) @@ -0,0 +1,42 @@ +#!/usr/bin/env python +import os,sys +import warnings + +#Recover ISSM_DIR and USERNAME +ISSM_DIR = os.getenv('ISSM_DIRPY3') +USERNAME = os.getenv('USER') +JPL_SVN = os.getenv('JPL_SVN') +if(ISSM_DIR==None): + raise NameError('"ISSM_DIR" environment variable is empty! You should define ISSM_DIR in your .cshrc or .bashrc!') +if(JPL_SVN==None): + warnings.warn('"JPL_SVN" environment variable is empty! add it to your .cshrc or .bashrc if you want to do distant computing') + +#Go through src/m and append any directory that contains a *.py file to PATH +for root,dirs,files in os.walk(ISSM_DIR+ '/src/py3'): + if '.svn' in dirs: + dirs.remove('.svn') + for file in files: + if file.find(".py") != -1: + if file.find(".pyc") == -1: + if root not in sys.path: + sys.path.append(root) + +sys.path.append(ISSM_DIR + '/lib') +sys.path.append(ISSM_DIR + '/src/wrappers/python/.libs') +# If using clusters, we need to have the path to the cluster settings directory +if(JPL_SVN!=None): + if os.path.exists(JPL_SVN + '/usr/' + USERNAME): + sys.path.append(JPL_SVN + '/usr/' + USERNAME) + else: + raise NameError ('cluster settings should be in, '+ JPL_SVN +'/usr/' + USERNAME) + +#Manual imports for commonly used functions +#from plotmodel import plotmodel + +#c = get_ipython().config +#c.InteractiveShellApp.exec_lines = [] +#c.InteractiveShellApp.exec_lines.append('%load_ext autoreload') +#c.InteractiveShellApp.exec_lines.append('%autoreload 2') +#c.InteractiveShellApp.exec_lines.append('print "Warning: disable autoreload in startup.py to improve performance." ') + +print("\n ISSM development path correctly loaded\n\n") Index: ../trunk-jpl/src/py3/dev/issmversion.py =================================================================== --- ../trunk-jpl/src/py3/dev/issmversion.py (revision 0) +++ ../trunk-jpl/src/py3/dev/issmversion.py (revision 19895) @@ -0,0 +1,20 @@ +from IssmConfig import IssmConfig + +def issmversion(): + """ + ISSMVERSION - display ISSM version + + Usage: + issmversion() + """ + + +print(' ') +print(IssmConfig('PACKAGE_NAME')[0]+' Version '+IssmConfig('PACKAGE_VERSION')[0]) +print('(website: '+IssmConfig('PACKAGE_URL')[0]+' contact: '+IssmConfig('PACKAGE_BUGREPORT')[0]+')') +print(' ') +print('Build date: '+IssmConfig('PACKAGE_BUILD_DATE')[0]) +print('Copyright (c) 2009-2015 California Institute of Technology') +print(' ') +print(' to get started type: issmdoc') +print(' ') Index: ../trunk-jpl/src/py3/parameterization/parameterize.py =================================================================== --- ../trunk-jpl/src/py3/parameterization/parameterize.py (revision 0) +++ ../trunk-jpl/src/py3/parameterization/parameterize.py (revision 19895) @@ -0,0 +1,33 @@ +import os +import datetime + +def parameterize(md,parametername): + """ + PARAMETERIZE - parameterize a model + + from a parameter python file, start filling in all the model fields that were not + filled in by the mesh.py and mask.py model methods. + Warning: the parameter file must be able to be run in Python + + Usage: + md=parameterize(md,parametername) + + Example: + md=parameterize(md,'Square.par'); + """ + + #some checks + if not os.path.exists(parametername): + raise IOError("parameterize error message: file '%s' not found!" % parametername) + + #Try and run parameter file. + exec(compile(open(parametername).read(), parametername, 'exec')) + + #Name and notes + if not md.miscellaneous.name: + md.miscellaneous.name=os.path.basename(parametername).split('.')[0] + + md.miscellaneous.notes="Model created by using parameter file: '%s' on: %s." % (parametername,datetime.datetime.strftime(datetime.datetime.now(),'%c')) + + return md + Index: ../trunk-jpl/src/py3/parameterization/setflowequation.py =================================================================== --- ../trunk-jpl/src/py3/parameterization/setflowequation.py (revision 0) +++ ../trunk-jpl/src/py3/parameterization/setflowequation.py (revision 19895) @@ -0,0 +1,289 @@ +import numpy +from model import model +from pairoptions import pairoptions +import MatlabFuncs as m +import PythonFuncs as p +from FlagElements import FlagElements + +def setflowequation(md,**kwargs): + """ + SETFLOWEQUATION - associate a solution type to each element + + This routine works like plotmodel: it works with an even number of inputs + 'SIA','SSA','HO','L1L2','FS' and 'fill' are the possible options + that must be followed by the corresponding exp file or flags list + It can either be a domain file (argus type, .exp extension), or an array of element flags. + If user wants every element outside the domain to be + setflowequationd, add '~' to the name of the domain file (ex: '~HO.exp'); + an empty string '' will be considered as an empty domain + a string 'all' will be considered as the entire domain + You can specify the type of coupling, 'penalties' or 'tiling', to use with the input 'coupling' + + Usage: + md=setflowequation(md,varargin) + + Example: + md=setflowequation(md,'HO','HO.exp',fill','SIA','coupling','tiling'); + """ + + #some checks on list of arguments + if not isinstance(md,model) or not len(kwargs): + raise TypeError("setflowequation error message") + + #process options + options=pairoptions(**kwargs) + print(options) +# options=deleteduplicates(options,1); + + #Find_out what kind of coupling to use + coupling_method=options.getfieldvalue('coupling','tiling') + if coupling_method is not 'tiling' or not 'penalties': + raise TypeError("coupling type can only be: tiling or penalties") + + #recover elements distribution + SIAflag = FlagElements(md,options.getfieldvalue('SIA','')) + SSAflag = FlagElements(md,options.getfieldvalue('SSA','')) + HOflag = FlagElements(md,options.getfieldvalue('HO','')) + L1L2flag = FlagElements(md,options.getfieldvalue('L1L2','')) + FSflag = FlagElements(md,options.getfieldvalue('FS','')) + filltype = options.getfieldvalue('fill','none') + + #Flag the elements that have not been flagged as filltype + if filltype is 'SIA': + SIAflag[numpy.nonzero(numpy.logical_not(p.logical_or_n(SSAflag,HOflag)))]=True + elif filltype is 'SSA': + SSAflag[numpy.nonzero(numpy.logical_not(p.logical_or_n(SIAflag,HOflag,FSflag)))]=True + elif filltype is 'HO': + HOflag[numpy.nonzero(numpy.logical_not(p.logical_or_n(SIAflag,SSAflag,FSflag)))]=True + + #check that each element has at least one flag + if not any(SIAflag+SSAflag+L1L2flag+HOflag+FSflag): + raise TypeError("elements type not assigned, supported models are 'SIA','SSA','HO' and 'FS'") + + #check that each element has only one flag + if any(SIAflag+SSAflag+L1L2flag+HOflag+FSflag>1): + print("setflowequation warning message: some elements have several types, higher order type is used for them") + SIAflag[numpy.nonzero(numpy.logical_and(SIAflag,SSAflag))]=False + SIAflag[numpy.nonzero(numpy.logical_and(SIAflag,HOflag))]=False + SSAflag[numpy.nonzero(numpy.logical_and(SSAflag,HOflag))]=False + + #FS can only be used alone for now: + if any(FSflag) and any(SIAflag): + raise TypeError("FS cannot be used with any other model for now, put FS everywhere") + + #Initialize node fields + nodeonSIA=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonSIA[md.mesh.elements[numpy.nonzero(SIAflag),:]-1]=True + nodeonSSA=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag),:]-1]=True + nodeonL1L2=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonL1L2[md.mesh.elements[numpy.nonzero(L1L2flag),:]-1]=True + nodeonHO=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonHO[md.mesh.elements[numpy.nonzero(HOflag),:]-1]=True + nodeonFS=numpy.zeros(md.mesh.numberofvertices,bool) + noneflag=numpy.zeros(md.mesh.numberofelements,bool) + + #First modify FSflag to get rid of elements contrained everywhere (spc + border with HO or SSA) + if any(FSflag): +# fullspcnodes=double((~isnan(md.stressbalance.spcvx)+~isnan(md.stressbalance.spcvy)+~isnan(md.stressbalance.spcvz))==3 | (nodeonHO & nodeonFS)); %find all the nodes on the boundary of the domain without icefront + fullspcnodes=numpy.logical_or(numpy.logical_not(numpy.isnan(md.stressbalance.spcvx)).astype(int)+ \ + numpy.logical_not(numpy.isnan(md.stressbalance.spcvy)).astype(int)+ \ + numpy.logical_not(numpy.isnan(md.stressbalance.spcvz)).astype(int)==3, \ + numpy.logical_and(nodeonHO,nodeonFS)).astype(int) #find all the nodes on the boundary of the domain without icefront +# fullspcelems=double(sum(fullspcnodes(md.mesh.elements),2)==6); %find all the nodes on the boundary of the domain without icefront + fullspcelems=(numpy.sum(fullspcnodes[md.mesh.elements-1],axis=1)==6).astype(int) #find all the nodes on the boundary of the domain without icefront + FSflag[numpy.nonzero(fullspcelems.reshape(-1))]=False + nodeonFS[md.mesh.elements[numpy.nonzero(FSflag),:]-1]=True + + #Then complete with NoneApproximation or the other model used if there is no FS + if any(FSflag): + if any(HOflag): #fill with HO + HOflag[numpy.logical_not(FSflag)]=True + nodeonHO[md.mesh.elements[numpy.nonzero(HOflag),:]-1]=True + elif any(SSAflag): #fill with SSA + SSAflag[numpy.logical_not(FSflag)]=True + nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag),:]-1]=True + else: #fill with none + noneflag[numpy.nonzero(numpy.logical_not(FSflag))]=True + + #Now take care of the coupling between SSA and HO + md.stressbalance.vertex_pairing=numpy.array([]) + nodeonSSAHO=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonHOFS=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonSSAFS=numpy.zeros(md.mesh.numberofvertices,bool) + SSAHOflag=numpy.zeros(md.mesh.numberofelements,bool) + SSAFSflag=numpy.zeros(md.mesh.numberofelements,bool) + HOFSflag=numpy.zeros(md.mesh.numberofelements,bool) + if coupling_method is 'penalties': + #Create the border nodes between HO and SSA and extrude them + numnodes2d=md.mesh.numberofvertices2d + numlayers=md.mesh.numberoflayers + bordernodes2d=numpy.nonzero(numpy.logical_and(nodeonHO[0:numnodes2d],nodeonSSA[0:numnodes2d]))[0]+1 #Nodes connected to two different types of elements + + #initialize and fill in penalties structure + if numpy.all(numpy.logical_not(numpy.isnan(bordernodes2d))): + penalties=numpy.zeros((0,2)) + for i in range(1,numlayers): + penalties=numpy.vstack((penalties,numpy.hstack((bordernodes2d.reshape(-1,1),bordernodes2d.reshape(-1,1)+md.mesh.numberofvertices2d*(i))))) + md.stressbalance.vertex_pairing=penalties + + elif coupling_method is 'tiling': + if any(SSAflag) and any(HOflag): #coupling SSA HO + #Find node at the border + nodeonSSAHO[numpy.nonzero(numpy.logical_and(nodeonSSA,nodeonHO))]=True + #SSA elements in contact with this layer become SSAHO elements + matrixelements=m.ismember(md.mesh.elements-1,numpy.nonzero(nodeonSSAHO)[0]) + commonelements=numpy.sum(matrixelements,axis=1)!=0 + commonelements[numpy.nonzero(HOflag)]=False #only one layer: the elements previously in SSA + SSAflag[numpy.nonzero(commonelements)]=False #these elements are now SSAHOelements + SSAHOflag[numpy.nonzero(commonelements)]=True + nodeonSSA[:]=False + nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag),:]-1]=True + + #rule out elements that don't touch the 2 boundaries + pos=numpy.nonzero(SSAHOflag)[0] + elist=numpy.zeros(numpy.size(pos),dtype=int) + elist = elist + numpy.sum(nodeonSSA[md.mesh.elements[pos,:]-1],axis=1).astype(bool) + elist = elist - numpy.sum(nodeonHO[md.mesh.elements[pos,:]-1] ,axis=1).astype(bool) + pos1=numpy.nonzero(elist==1)[0] + SSAflag[pos[pos1]]=True + SSAHOflag[pos[pos1]]=False + pos2=numpy.nonzero(elist==-1)[0] + HOflag[pos[pos2]]=True + SSAHOflag[pos[pos2]]=False + + #Recompute nodes associated to these elements + nodeonSSA[:]=False + nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag),:]-1]=True + nodeonHO[:]=False + nodeonHO[md.mesh.elements[numpy.nonzero(HOflag),:]-1]=True + nodeonSSAHO[:]=False + nodeonSSAHO[md.mesh.elements[numpy.nonzero(SSAHOflag),:]-1]=True + + elif any(HOflag) and any(FSflag): #coupling HO FS + #Find node at the border + nodeonHOFS[numpy.nonzero(numpy.logical_and(nodeonHO,nodeonFS))]=True + #FS elements in contact with this layer become HOFS elements + matrixelements=m.ismember(md.mesh.elements-1,numpy.nonzero(nodeonHOFS)[0]) + commonelements=numpy.sum(matrixelements,axis=1)!=0 + commonelements[numpy.nonzero(HOflag)]=False #only one layer: the elements previously in SSA + FSflag[numpy.nonzero(commonelements)]=False #these elements are now SSAHOelements + HOFSflag[numpy.nonzero(commonelements)]=True + nodeonFS=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonFS[md.mesh.elements[numpy.nonzero(FSflag),:]-1]=True + + #rule out elements that don't touch the 2 boundaries + pos=numpy.nonzero(HOFSflag)[0] + elist=numpy.zeros(numpy.size(pos),dtype=int) + elist = elist + numpy.sum(nodeonFS[md.mesh.elements[pos,:]-1],axis=1).astype(bool) + elist = elist - numpy.sum(nodeonHO[md.mesh.elements[pos,:]-1],axis=1).astype(bool) + pos1=numpy.nonzero(elist==1)[0] + FSflag[pos[pos1]]=True + HOFSflag[pos[pos1]]=False + pos2=numpy.nonzero(elist==-1)[0] + HOflag[pos[pos2]]=True + HOFSflag[pos[pos2]]=False + + #Recompute nodes associated to these elements + nodeonFS[:]=False + nodeonFS[md.mesh.elements[numpy.nonzero(FSflag),:]-1]=True + nodeonHO[:]=False + nodeonHO[md.mesh.elements[numpy.nonzero(HOflag),:]-1]=True + nodeonHOFS[:]=False + nodeonHOFS[md.mesh.elements[numpy.nonzero(HOFSflag),:]-1]=True + + elif any(FSflag) and any(SSAflag): + #Find node at the border + nodeonSSAFS[numpy.nonzero(numpy.logical_and(nodeonSSA,nodeonFS))]=True + #FS elements in contact with this layer become SSAFS elements + matrixelements=m.ismember(md.mesh.elements-1,numpy.nonzero(nodeonSSAFS)[0]) + commonelements=numpy.sum(matrixelements,axis=1)!=0 + commonelements[numpy.nonzero(SSAflag)]=False #only one layer: the elements previously in SSA + FSflag[numpy.nonzero(commonelements)]=False #these elements are now SSASSAelements + SSAFSflag[numpy.nonzero(commonelements)]=True + nodeonFS=numpy.zeros(md.mesh.numberofvertices,bool) + nodeonFS[md.mesh.elements[numpy.nonzero(FSflag),:]-1]=True + + #rule out elements that don't touch the 2 boundaries + pos=numpy.nonzero(SSAFSflag)[0] + elist=numpy.zeros(numpy.size(pos),dtype=int) + elist = elist + numpy.sum(nodeonSSA[md.mesh.elements[pos,:]-1],axis=1).astype(bool) + elist = elist - numpy.sum(nodeonFS[md.mesh.elements[pos,:]-1] ,axis=1).astype(bool) + pos1=numpy.nonzero(elist==1)[0] + SSAflag[pos[pos1]]=True + SSAFSflag[pos[pos1]]=False + pos2=numpy.nonzero(elist==-1)[0] + FSflag[pos[pos2]]=True + SSAFSflag[pos[pos2]]=False + + #Recompute nodes associated to these elements + nodeonSSA[:]=False + nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag),:]-1]=True + nodeonFS[:]=False + nodeonFS[md.mesh.elements[numpy.nonzero(FSflag),:]-1]=True + nodeonSSAFS[:]=False + nodeonSSAFS[md.mesh.elements[numpy.nonzero(SSAFSflag),:]-1]=True + + elif any(FSflag) and any(SIAflag): + raise TypeError("type of coupling not supported yet") + + #Create SSAHOApproximation where needed + md.flowequation.element_equation=numpy.zeros(md.mesh.numberofelements,int) + md.flowequation.element_equation[numpy.nonzero(noneflag)]=0 + md.flowequation.element_equation[numpy.nonzero(SIAflag)]=1 + md.flowequation.element_equation[numpy.nonzero(SSAflag)]=2 + md.flowequation.element_equation[numpy.nonzero(L1L2flag)]=3 + md.flowequation.element_equation[numpy.nonzero(HOflag)]=4 + md.flowequation.element_equation[numpy.nonzero(FSflag)]=5 + md.flowequation.element_equation[numpy.nonzero(SSAHOflag)]=6 + md.flowequation.element_equation[numpy.nonzero(SSAFSflag)]=7 + md.flowequation.element_equation[numpy.nonzero(HOFSflag)]=8 + + #border + md.flowequation.borderHO=nodeonHO + md.flowequation.borderSSA=nodeonSSA + md.flowequation.borderFS=nodeonFS + + #Create vertices_type + md.flowequation.vertex_equation=numpy.zeros(md.mesh.numberofvertices,int) + pos=numpy.nonzero(nodeonSSA) + md.flowequation.vertex_equation[pos]=2 + pos=numpy.nonzero(nodeonL1L2) + md.flowequation.vertex_equation[pos]=3 + pos=numpy.nonzero(nodeonHO) + md.flowequation.vertex_equation[pos]=4 + pos=numpy.nonzero(nodeonFS) + md.flowequation.vertex_equation[pos]=5 + #DO SIA LAST! Otherwise spcs might not be set up correctly (SIA should have priority) + pos=numpy.nonzero(nodeonSIA) + md.flowequation.vertex_equation[pos]=1 + if any(FSflag): + pos=numpy.nonzero(numpy.logical_not(nodeonFS)) + if not (any(HOflag) or any(SSAflag)): + md.flowequation.vertex_equation[pos]=0 + pos=numpy.nonzero(nodeonSSAHO) + md.flowequation.vertex_equation[pos]=6 + pos=numpy.nonzero(nodeonHOFS) + md.flowequation.vertex_equation[pos]=7 + pos=numpy.nonzero(nodeonSSAFS) + md.flowequation.vertex_equation[pos]=8 + + #figure out solution types + md.flowequation.isSIA=any(md.flowequation.element_equation==1) + md.flowequation.isSSA=any(md.flowequation.element_equation==2) + md.flowequation.isL1L2=any(md.flowequation.element_equation==3) + md.flowequation.isHO=any(md.flowequation.element_equation==4) + md.flowequation.isFS=any(md.flowequation.element_equation==5) + + return md + + #Check that tiling can work: + if any(md.flowequation.borderSSA) and any(md.flowequation.borderHO) and any(md.flowequation.borderHO + md.flowequation.borderSSA !=1): + raise TypeError("error coupling domain too irregular") + if any(md.flowequation.borderSSA) and any(md.flowequation.borderFS) and any(md.flowequation.borderFS + md.flowequation.borderSSA !=1): + raise TypeError("error coupling domain too irregular") + if any(md.flowequation.borderFS) and any(md.flowequation.borderHO) and any(md.flowequation.borderHO + md.flowequation.borderFS !=1): + raise TypeError("error coupling domain too irregular") + + return md + Index: ../trunk-jpl/src/py3/parameterization/sethydrostaticmask.py =================================================================== --- ../trunk-jpl/src/py3/parameterization/sethydrostaticmask.py (revision 0) +++ ../trunk-jpl/src/py3/parameterization/sethydrostaticmask.py (revision 19895) @@ -0,0 +1,34 @@ +import numpy +import os +from model import model +from FlagElements import FlagElements +from ContourToMesh import ContourToMesh + +def setmask(md) + """ + SETHYDROSTATICMASK - establish groundedice_levelset field + + Determines grounded and floating ice position based on + md.geometry.bed and md.geometry.thickness + + Usage: + md=sethydrostaticmask(md) + + Examples: + md=sethydrostaticmask(md); + """ + + if numpy.size(md.geometry.bed,axis=0)!=md.mesh.numberofvertices or numpy.size(md.geometry.base,axis=0)!=md.mesh.numberofvertices or numpy.size(md.geometry.thickness,axis=0)!=md.mesh.numberofvertices: + raise IOError("hydrostaticmask error message: fields in md.geometry do not have the right size.") + + # grounded ice level set + md.mask.groundedice_levelset=md.geometry.thickness+md.geometry.bed*md.materials.rho_water/md.materials.rho_ice + + #Check consistency of geometry + if any(md.geometry.base[numpy.nonzero(md.mask.groundedice_levelset>0.)]!=md.geometry.bed[numpy.nonzero(md.mask.groundedice_levelset>0.)]): + print "WARNING: md.geometry.bed and md.geometry.base not equal on grounded ice" + + if any(md.geometry.base[numpy.nonzero(md.mask.groundedice_levelset<=0.)]1: + raise RuntimeError("contourenvelope error message: bad usage") + + if len(args)==1: + flags=args[0] + + if isinstance(flags,str): + file=flags + if not os.path.exists(file): + raise IOError("contourenvelope error message: file '%s' not found" % file) + isfile=1 + elif isinstance(flags,(bool,int,float)): + #do nothing for now + isfile=0 + else: + raise TypeError("contourenvelope error message: second argument should be a file or an elements flag") + + #Now, build the connectivity tables for this mesh. + #Computing connectivity + if numpy.size(md.mesh.vertexconnectivity,axis=0)!=md.mesh.numberofvertices and numpy.size(md.mesh.vertexconnectivity,axis=0)!=md.mesh.numberofvertices2d: + [md.mesh.vertexconnectivity]=NodeConnectivity(md.mesh.elements,md.mesh.numberofvertices) + if numpy.size(md.mesh.elementconnectivity,axis=0)!=md.mesh.numberofelements and numpy.size(md.mesh.elementconnectivity,axis=0)!=md.mesh.numberofelements2d: + [md.mesh.elementconnectivity]=ElementConnectivity(md.mesh.elements,md.mesh.vertexconnectivity) + + #get nodes inside profile + elementconnectivity=copy.deepcopy(md.mesh.elementconnectivity) + if md.mesh.dimension()==2: + elements=copy.deepcopy(md.mesh.elements) + x=copy.deepcopy(md.mesh.x) + y=copy.deepcopy(md.mesh.y) + numberofvertices=copy.deepcopy(md.mesh.numberofvertices) + numberofelements=copy.deepcopy(md.mesh.numberofelements) + else: + elements=copy.deepcopy(md.mesh.elements2d) + x=copy.deepcopy(md.mesh.x2d) + y=copy.deepcopy(md.mesh.y2d) + numberofvertices=copy.deepcopy(md.mesh.numberofvertices2d) + numberofelements=copy.deepcopy(md.mesh.numberofelements2d) + + if len(args)==1: + + if isfile: + #get flag list of elements and nodes inside the contour + nodein=ContourToMesh(elements,x,y,file,'node',1) + elemin=(numpy.sum(nodein(elements),axis=1)==numpy.size(elements,axis=1)) + #modify element connectivity + elemout=numpy.nonzero(numpy.logical_not(elemin))[0] + elementconnectivity[elemout,:]=0 + elementconnectivity[numpy.nonzero(m.ismember(elementconnectivity,elemout+1))]=0 + else: + #get flag list of elements and nodes inside the contour + nodein=numpy.zeros(numberofvertices) + elemin=numpy.zeros(numberofelements) + + pos=numpy.nonzero(flags) + elemin[pos]=1 + nodein[elements[pos,:]-1]=1 + + #modify element connectivity + elemout=numpy.nonzero(numpy.logical_not(elemin))[0] + elementconnectivity[elemout,:]=0 + elementconnectivity[numpy.nonzero(m.ismember(elementconnectivity,elemout+1))]=0 + + #Find element on boundary + #First: find elements on the boundary of the domain + flag=copy.deepcopy(elementconnectivity) + if len(args)==1: + flag[numpy.nonzero(flag)]=elemin[flag[numpy.nonzero(flag)]] + elementonboundary=numpy.logical_and(numpy.prod(flag,axis=1)==0,numpy.sum(flag,axis=1)>0) + + #Find segments on boundary + pos=numpy.nonzero(elementonboundary)[0] + num_segments=numpy.size(pos) + segments=numpy.zeros((num_segments*3,3),int) + count=0 + + for el1 in pos: + els2=elementconnectivity[el1,numpy.nonzero(elementconnectivity[el1,:])[0]]-1 + if numpy.size(els2)>1: + flag=numpy.intersect1d(numpy.intersect1d(elements[els2[0],:],elements[els2[1],:]),elements[el1,:]) + nods1=elements[el1,:] + nods1=numpy.delete(nods1,numpy.nonzero(nods1==flag)) + segments[count,:]=[nods1[0],nods1[1],el1+1] + + ord1=numpy.nonzero(nods1[0]==elements[el1,:])[0][0] + ord2=numpy.nonzero(nods1[1]==elements[el1,:])[0][0] + + #swap segment nodes if necessary + if ( (ord1==0 and ord2==1) or (ord1==1 and ord2==2) or (ord1==2 and ord2==0) ): + temp=segments[count,0] + segments[count,0]=segments[count,1] + segments[count,1]=temp + segments[count,0:2]=numpy.flipud(segments[count,0:2]) + count+=1 + else: + nods1=elements[el1,:] + flag=numpy.setdiff1d(nods1,elements[els2,:]) + for j in range(0,3): + nods=numpy.delete(nods1,j) + if numpy.any(m.ismember(flag,nods)): + segments[count,:]=[nods[0],nods[1],el1+1] + ord1=numpy.nonzero(nods[0]==elements[el1,:])[0][0] + ord2=numpy.nonzero(nods[1]==elements[el1,:])[0][0] + if ( (ord1==0 and ord2==1) or (ord1==1 and ord2==2) or (ord1==2 and ord2==0) ): + temp=segments[count,0] + segments[count,0]=segments[count,1] + segments[count,1]=temp + segments[count,0:2]=numpy.flipud(segments[count,0:2]) + count+=1 + segments=segments[0:count,:] + + return segments + Index: ../trunk-jpl/src/py3/parameterization/setmask.py =================================================================== --- ../trunk-jpl/src/py3/parameterization/setmask.py (revision 0) +++ ../trunk-jpl/src/py3/parameterization/setmask.py (revision 19895) @@ -0,0 +1,68 @@ +import numpy +import os +from model import model +from FlagElements import FlagElements +from pairoptions import pairoptions +from ContourToMesh import ContourToMesh + +def setmask(md, floatingicename, groundedicename, **kwargs): + """ + SETMASK - establish boundaries between grounded and floating ice. + + By default, ice is considered grounded. The contour floatingicename defines nodes + for which ice is floating. The contour groundedicename defines nodes inside an floatingice, + that are grounded (ie: ice rises, islands, etc ...) + All input files are in the Argus format (extension .exp). + + Usage: + md=setmask(md,floatingicename,groundedicename) + + Examples: + md=setmask(md,'all',''); + md=setmask(md,'Iceshelves.exp','Islands.exp'); + """ + #some checks on list of arguments + if not isinstance(md,model): + raise TypeError("setmask error message") + + #process options + options=pairoptions(**kwargs) + + #Get assigned fields + x = md.mesh.x + y = md.mesh.y + elements = md.mesh.elements + + #Assign elementonfloatingice, elementongroundedice, vertexongroundedice and vertexonfloatingice. Only change at your own peril! This is synchronized heavily with the GroundingLineMigration module. {{{ + elementonfloatingice = FlagElements(md, floatingicename) + elementongroundedice = FlagElements(md, groundedicename) + + #Because groundedice nodes and elements can be included into an floatingice, we need to update. Remember, all the previous + #arrays come from domain outlines that can intersect one another: + + elementonfloatingice = numpy.logical_and(elementonfloatingice,numpy.logical_not(elementongroundedice)) + elementongroundedice = numpy.logical_not(elementonfloatingice) + + #the order here is important. we choose vertexongroundedice as default on the grounding line. + vertexonfloatingice = numpy.zeros(md.mesh.numberofvertices,'bool') + vertexongroundedice = numpy.zeros(md.mesh.numberofvertices,'bool') + vertexongroundedice[md.mesh.elements[numpy.nonzero(elementongroundedice),:]-1]=True + vertexonfloatingice[numpy.nonzero(numpy.logical_not(vertexongroundedice))]=True + #}}} + + #level sets + md.mask.groundedice_levelset = -1.*numpy.ones(md.mesh.numberofvertices) + md.mask.groundedice_levelset[md.mesh.elements[numpy.nonzero(elementongroundedice),:]-1]=1. + + if(len(kwargs)): + md.mask.ice_levelset = 1.*numpy.ones(md.mesh.numberofvertices) + icedomainfile = options.getfieldvalue('icedomain','none') + if not os.path.exists(icedomainfile): + raise IOError("setmask error message: ice domain file '%s' not found." % icedomainfile) + #use contourtomesh to set ice values inside ice domain + [vertexinsideicedomain,elementinsideicedomain]=ContourToMesh(elements,x,y,icedomainfile,'node',1) + md.mask.ice_levelset[numpy.nonzero(vertexinsideicedomain)[0]] = -1. + else: + md.mask.ice_levelset = -1.*numpy.ones(md.mesh.numberofvertices) + + return md Index: ../trunk-jpl/src/py3/plot/checkplotoptions.py =================================================================== --- ../trunk-jpl/src/py3/plot/checkplotoptions.py (revision 0) +++ ../trunk-jpl/src/py3/plot/checkplotoptions.py (revision 19895) @@ -0,0 +1,168 @@ +import numpy as npy + +def checkplotoptions(md,options): + ''' + CHECKPLOTOPTIONS - build a structure that holds all plot options + + Usage: + options=checkplotoptions(md,options) + + See also: PLOTMODEL + + NOTE: not fully implemented yet + ''' + + + #units + if options.exist('unit'): + if 'km' in options.getfieldvalue('unit','km'): + options.changefieldvalue('unit',10**-3) + elif '100km' in options.getfieldvalue('unit','100km'): + options.changefieldvalue('unit',10**-5) + + #density + if options.exist('density'): + density=options.getfieldvalue('density') + options.changefieldvalue('density',abs(ceil(density))) + + #show section + if options.exist('showsection'): + if 'on' in options.getfieldvalue('showsection','on'): + options.changefieldvalue('showsection',4) + + #smooth values + if options.exist('smooth'): + if 'on' in options.getfieldvalue('smooth','on'): + options.changefieldvalue('smooth',0) + + #contouronly values + if options.exist('contouronly'): + if 'on' in options.getfieldvalue('contouronly','on'): + options.changefieldvalue('contouronly',1) + + #colorbar + if options.exist('colorbar'): + if 'on' in options.getfieldvalue('colorbar','on'): + options.changefieldvalue('colorbar',1) + elif 'off' in options.getfieldvalue('colorbar','off'): + options.changefieldvalue('colorbar',0) + + #text + if options.exist('text'): + + # text values (coerce to list for consistent functionality) + textlist=[] + text=options.getfieldvalue('text','default text') + textlist.extend([text] if isinstance(text,str) else text) + numtext=len(textlist) + + # text position + textpos=options.getfieldvalue('textposition',[0.5,0.5]) + if not isinstance(textpos,list): + raise Exception('textposition should be passed as a list') + if any(isinstance(i,list) for i in textpos): + textx=[item[0] for item in textpos] + texty=[item[1] for item in textpos] + else: + textx=[textpos[0]] + texty=[textpos[1]] + if len(textx)!=numtext or len(texty)!=numtext: + raise Exception('textposition should contain one list of x,y vertices for every text instance') + + # font size + if options.exist('textfontsize'): + textfontsize=options.getfieldvalue('textfontsize',12) + sizelist=[] + sizelist.extend(textsize if isinstance(textfontsize,list) else [textfontsize]) + else: + sizelist=[12] + if len(sizelist)==1: + sizelist=npy.tile(sizelist,numtext) + + # font color + if options.exist('textcolor'): + textcolor=options.getfieldvalue('textcolor','k') + colorlist=[] + colorlist.extend(textcolor if isinstance(textcolor,list) else [textcolor]) + else: + colorlist=['k'] + if len(colorlist)==1: + colorlist=npy.tile(colorlist,numtext) + + # textweight + if options.exist('textweight'): + textweight=options.getfieldvalue('textweight') + weightlist=[] + weightlist.extend(textweight if isinstance(textweight,list) else [textweight]) + else: + weightlist=['normal'] + if len(weightlist)==1: + weightlist=npy.tile(weightlist,numtext) + + # text rotation + if options.exist('textrotation'): + textrotation=options.getfieldvalue('textrotation',0) + rotationlist=[] + rotationlist.extend(textrotation if isinstance(textrotation,list) else [textrotation]) + else: + rotationlist=[0] + if len(rotationlist)==1: + rotationlist=npy.tile(rotationlist,numtext) + + options.changefieldvalue('text',textlist) + options.addfield('textx',textx) + options.addfield('texty',texty) + options.changefieldvalue('textfontsize',sizelist) + options.changefieldvalue('textcolor',colorlist) + options.changefieldvalue('textweight',weightlist) + options.changefieldvalue('textrotation',rotationlist) + + #expdisp + expdispvaluesarray=[] + expstylevaluesarray=[] + expstylevalues=[] + if options.exist('expstyle'): + expstylevalues=options.getfieldvalue('expstyle') + if type(expstylevalues)==str: + expstylevalues=[expstylevalues] + if options.exist('expdisp'): + expdispvalues=options.getfieldvalue('expdisp') + if type(expdispvalues)==str: + expdispvalues=[expdispvalues] + for i in npy.arange(len(expdispvalues)): + expdispvaluesarray.append(expdispvalues[i]) + if len(expstylevalues)>i: + expstylevaluesarray.append(expstylevalues[i]) + else: + expstylevaluesarray.append('-k') + + options.changefieldvalue('expstyle',expstylevaluesarray) + options.changefieldvalue('expdisp',expdispvaluesarray) + + #latlonnumbering + if options.exist('latlonclick'): + if 'on' in options.getfieldvalue('latlonclick','on'): + options.changefieldvalue('latlonclick',1) + + #northarrow + if options.exist('northarrow'): + if 'on' in options.getfieldvalue('northarrow','on'): + #default values + Lx=max(md.mesh.x)-min(md.mesh.x) + Ly=max(md.mesh.y)-min(md.mesh.y) + options.changefieldvalue('northarrow',[min(md.mesh.x)+1./6.*Lx, min(md.mesh.y)+5./6.*Ly, 1./15.*Ly, 0.25, 1./250.*Ly]) + + #scale ruler + if options.exist('scaleruler'): + if 'on' in options.exist('scaleruler','on'): + Lx=max(md.mesh.x)-min(md.mesh.x) + Ly=max(md.mesh.y)-min(md.mesh.y) + options.changefieldvalue('scaleruler',[min(md.mesh.x)+6./8.*Lx, min(md.mesh.y)+1./10.*Ly, 10**(ceil(log10(Lx)))/5, floor(Lx/100), 5]) + + #log scale + if options.exist('log'): + if options.exist('clim'): + options.changefieldvalue('clim',log(options.getfieldvalue('clim'))/log(options.getfieldvalue('log'))) + options.changefieldvalue('cutoff',log(options.getfieldvalue('cutoff',1.5))/log(options.getfieldvalue('log'))) + + return options Index: ../trunk-jpl/src/py3/plot/colormaps/cmaptools.py =================================================================== --- ../trunk-jpl/src/py3/plot/colormaps/cmaptools.py (revision 0) +++ ../trunk-jpl/src/py3/plot/colormaps/cmaptools.py (revision 19895) @@ -0,0 +1,25 @@ +import numpy as npy + +try: + import matplotlib as mpl +except ImportError: + print('cannot import matplotlib, no plotting capabilities enabled') + +def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): + ''' + truncate a colormap within normalized limits [0,1] + + cmap - a matplotlib colormap + minval - minimum value, normalized, of cmap to be returned. + maxval - maximum value, normalized, of cmap to be returned. + n - number of levels to use in constructing the new colormap + + Example: + newcmap=truncate_colormap(oldcmap,minval=0.2,maxval=0.8,n=128) + + ''' + + new_cmap = mpl.colors.LinearSegmentedColormap.from_list('trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, + a=minval, b=maxval), cmap(npy.linspace(minval, maxval, n))) + + return new_cmap Index: ../trunk-jpl/src/py3/plot/plot_manager.py =================================================================== --- ../trunk-jpl/src/py3/plot/plot_manager.py (revision 0) +++ ../trunk-jpl/src/py3/plot/plot_manager.py (revision 19895) @@ -0,0 +1,102 @@ +try: + import pylab as p + import matplotlib.pyplot as plt +except ImportError: + print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled") + +from checkplotoptions import checkplotoptions +from plot_mesh import plot_mesh +from processmesh import processmesh +from processdata import processdata +from plot_unit import plot_unit +from applyoptions import applyoptions + +try: + from osgeo import gdal + overlaysupport=True +except ImportError: + print('osgeo/gdal for python not installed, overlay plots are not enabled') + overlaysupport=False + +if overlaysupport: + from plot_overlay import plot_overlay + +def plot_manager(md,options,fig,ax): + ''' + PLOT_MANAGER - distribute the plots called by plotmodel + + 'fig' is a handle to the figure instance created by plotmodel. + + 'ax' is a handle to the axes instance created by plotmodel. This is + currently generated using matplotlib's AxesGrid toolkit. + + Usage: + plot_manager(md,options,fig,ax); + + See also: PLOTMODEL, PLOT_UNIT + ''' + + #parse options and get a structure of options + options=checkplotoptions(md,options) + + #get data to be plotted + data=options.getfieldvalue('data'); + + #initialize plot handle variable + #handle=None + + # initialize subplot + #p.subplot(nrows,ncols,i,aspect='equal') + + ##basemap plot + #if options.exist('basemap'): + # plot_basemap(md,data,options,nrows,ncols,i) + + #overlay plot + if options.exist('overlay') and overlaysupport: + plot_overlay(md,data,options,ax) + options.addfielddefault('alpha',0.5) + options.addfielddefault('xlim',[min(md.mesh.x),max(md.mesh.x)]) + options.addfielddefault('ylim',[min(md.mesh.y),max(md.mesh.y)]) + + #figure out if this is a special plot + if isinstance(data,str): + + # convert string to lower case for a case-insensitive comparison + if data.lower()=='mesh': + plot_mesh(md,options,ax) + applyoptions(md,[],options,fig,ax) + fig.delaxes(fig.axes[1]) # hack to remove colorbar after the fact + return + elif data.lower()=='none': + print('no data provided to plot (TODO: write plot_none.py)') + applyoptions(md,[],options,fig,ax) + return + else: + print(("WARNING: '%s' is not implemented or is not a valid string for option 'data'" % data)) + + #elif data in vars(md): + #else: + #print "'data' not a string, plotting model properties yet to be implemented..." + + #Gridded plot + + #Section plot + + #Profile plot + + #process data and model + x,y,z,elements,is2d,isplanet=processmesh(md,data,options) + data2,datatype=processdata(md,data,options) + + #standard plot + #if not handle: + # p.subplot(nrows,ncols,i,aspect='equal') + + #plot unit + plot_unit(x,y,z,elements,data2,is2d,isplanet,datatype,options,ax) + + #apply all options + applyoptions(md,data2,options,fig,ax) + + #ground overlay on kml plot_unit Index: ../trunk-jpl/src/py3/plot/processdata.py =================================================================== --- ../trunk-jpl/src/py3/plot/processdata.py (revision 0) +++ ../trunk-jpl/src/py3/plot/processdata.py (revision 19895) @@ -0,0 +1,108 @@ +from math import isnan +import numpy as npy + +def processdata(md,data,options): + """ + PROCESSDATA - process data to be plotted + + datatype = 1 -> elements + datatype = 2 -> nodes + datatype = 3 -> node quivers + datatype = 4 -> patch + + Usage: + data,datatype=processdata(md,data,options); + + See also: PLOTMODEL, PROCESSMESH + """ + + #check format + if (len(data)==0 or (len(data)==1 and not isinstance(data,dict) and isnan(data).all())): + raise ValueError("processdata error message: 'data' provided is empty") + + #needed later on + if 'numberofvertices2d' in dir(md.mesh): + numberofvertices2d=md.mesh.numberofvertices2d + numberofelements2d=md.mesh.numberofelements2d + else: + numberofvertices2d=npy.nan + numberofelements2d=npy.nan + + procdata=npy.copy(data) + + #process patch + + #initialize datatype + datatype=0 + + #get datasize + if npy.ndim(procdata)==1: + datasize=npy.array([len(procdata),1]) + else: + datasize=npy.shape(procdata) + if len(datasize)>2: + raise ValueError('data passed to plotmodel has more than 2 dimensions; check that column vectors are rank-1') + + #process NaN's if any + nanfill=options.getfieldvalue('nan',-9999) + if npy.any(npy.isnan(procdata)): + lb=npy.min(data[~npy.isnan(data)]) + ub=npy.max(data[~npy.isnan(data)]) + if lb==ub: + lb=lb-0.5 + ub=ub+0.5 + nanfill=lb-1 + procdata[npy.isnan(procdata)]=nanfill + options.addfielddefault('clim',[lb,ub]) + options.addfielddefault('cmap_set_under','1') + print(("WARNING: nan's treated as", nanfill, "by default. Change using pairoption 'nan',nan_fill_value in plotmodel call")) + + #quiver plot + if datasize[1]>1 and datasize[0]!= md.mesh.numberofvertices+1: + if datasize[0]==md.mesh.numberofvertices and datasize[1]==2: + datatype=3 + else: + raise ValueError('plotmodel error message: data should have two columns of length md.mesh.numberofvertices for a quiver plot') + + #non-patch processing + + #element data + if datasize[0]==md.mesh.numberofelements and datasize[1]==1: + + #initialize datatype if non patch + if datatype!=4 and datatype!=5: + datatype=1 + + #mask? + + #log? + + #node data + if datasize[0]==md.mesh.numberofvertices and datasize[1]==1: + datatype=2 + + #spc time series? + if datasize[0]==md.mesh.numberofvertices+1: + datatype=2 + spccol=options.getfieldvalue('spccol',0) + print('multiple-column spc field; specify column to plot using option "spccol"') + print(('column ', spccol, ' plotted for time: ', procdata[-1,spccol])) + procdata=procdata[0:-1,spccol] + + #mask? + + #log? + + #layer projection? + + #control arrow density if quiver plot + + #convert rank-2 array to rank-1 + if npy.ndim(procdata)==2 and npy.shape(procdata)[1]==1: + procdata=procdata.reshape(-1,) + + #if datatype is still zero, error out + if datatype==0: + raise ValueError("processdata error: data provided not recognized or not supported") + else: + return procdata, datatype Index: ../trunk-jpl/src/py3/plot/plot_mesh.py =================================================================== --- ../trunk-jpl/src/py3/plot/plot_mesh.py (revision 0) +++ ../trunk-jpl/src/py3/plot/plot_mesh.py (revision 19895) @@ -0,0 +1,29 @@ +try: + import pylab as p +except ImportError: + print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled") + +from processmesh import processmesh +from applyoptions import applyoptions + +def plot_mesh(md,options,ax): + ''' + PLOT_MESH - plot model mesh + + Usage: + plot_mesh(md,options,nlines,ncols,i) + + See also: PLOTMODEL + ''' + + x,y,z,elements,is2d,isplanet=processmesh(md,[],options) + + if is2d: + ax.triplot(x,y,elements) + else: + print('WARNING: only 2D mesh plot is currently implemented') + + #apply options + options.addfielddefault('title','Mesh') + options.addfielddefault('colorbar','off') + options.addfielddefault('ticklabels','on') Index: ../trunk-jpl/src/py3/plot/processmesh.py =================================================================== --- ../trunk-jpl/src/py3/plot/processmesh.py (revision 0) +++ ../trunk-jpl/src/py3/plot/processmesh.py (revision 19895) @@ -0,0 +1,83 @@ +from math import isnan +import MatlabFuncs as m +import numpy as npy + +def processmesh(md,data,options): + """ + PROCESSMESH - process the mesh for plotting + + Usage: + x,y,z,elements,is2d=processmech(md,data,options) + + See also: PLOTMODEL, PROCESSDATA + """ + + #some checks + if md.mesh.numberofvertices==0: + raise ValueError('processmesh error: mesh is empty') + if md.mesh.numberofvertices==md.mesh.numberofelements: + raise ValueError('processmesh error: the number of elements is the same as the number of nodes') + + if len(data)==0 or not isinstance(data,dict): + + if 'latlon' not in options.getfieldvalue('coord','xy').lower(): #convert to lower case for comparison + x=md.mesh.x + if 'x2d' in dir(md.mesh): x2d=md.mesh.x2d + y=md.mesh.y + if 'y2d' in dir(md.mesh): y2d=md.mesh.x2d + else: + x=md.mesh.long + y=md.mesh.lat + + if 'z' in dir(md.mesh): + z=md.mesh.z + else: + z=npy.zeros_like(md.mesh.x) + + if 'elements2d' in dir(md.mesh): + elements2d=md.mesh.elements2d + elements2d=elements2d-1 # subtract one since python indexes from zero + elements=md.mesh.elements + elements=elements-1 + + #is it a 2D plot? + if md.mesh.dimension()==2: + is2d=1 + else: + if options.getfieldvalue('layer',0)>=1: + is2d=1 + else: + is2d=0 + + #layer projection? + if options.getfieldvalue('layer',0)>=1: + if 'latlon' in options.getfieldvalue('coord','xy').lower(): + raise ValueError('processmesh error: cannot work with 3D mesh in lat-lon coords') + #we modify the mesh temporarily to a 2D mesh from which the 3D mesh was extruded + x=x2d + y=y2d + z=zeros(size(x2d)) + elements=elements2d + + else: + #Process mesh for plotting + if md.mesh.dimension()==2: + is2d=1 + else: + # process polycollection here for 3D plot + is2d=0 + + #units + if options.exist('unit'): + unit=options.getfieldvalue('unit') + x=x*unit + y=y*unit + z=z*unit + + #is model a member of planet class? (workaround until planet class defined) + if md.__class__.__name__!='model': + isplanet=1 + else: + isplanet=0 + + return x,y,z,elements,is2d,isplanet Index: ../trunk-jpl/src/py3/plot/writejsfield.py =================================================================== --- ../trunk-jpl/src/py3/plot/writejsfield.py (revision 0) +++ ../trunk-jpl/src/py3/plot/writejsfield.py (revision 19895) @@ -0,0 +1,26 @@ +import numpy +def writejsfield(fid,name,variable,nods): +#WRITEJSFIELD - write variable to javascript file +# +# Usage: +# writejsfield(fid,name,variable) +# + #write array: + #if not isinstance(variable, list): + if type(variable[0])==numpy.float64: + fid.write('\n'.format(name)) + fid.write('{0}=['.format(name)) + for i in range(0, nods-1): + fid.write('{0},'.format(variable[i])) + fid.write('{0}];\n'.format(variable[-1])) + fid.write('\n') + else: + #multi-sized array: + fid.write('\n'.format(name)) + fid.write('{0}=[]\n'.format(name)) + for i in range(0, len(variable[2])): + fid.write('{0}["{1}"]=['.format(name,i)) + for j in range(1, nods-1): + fid.write('{0},'.format(variable[j][i])) + fid.write('{0}];\n'.format(variable[-1][i])) + fid.write('\n') Index: ../trunk-jpl/src/py3/plot/plot_unit.py =================================================================== --- ../trunk-jpl/src/py3/plot/plot_unit.py (revision 0) +++ ../trunk-jpl/src/py3/plot/plot_unit.py (revision 19895) @@ -0,0 +1,92 @@ +from cmaptools import truncate_colormap +try: + import pylab as p + import matplotlib as mpl + import matplotlib.pyplot as plt + import numpy as npy +except ImportError: + print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled") + +def plot_unit(x,y,z,elements,data,is2d,isplanet,datatype,options,ax): + """ + PLOT_UNIT - unit plot, display data + + Usage: + plot_unit(x,y,z,elements,data,is2d,isplanet,datatype,options) + + See also: PLOTMODEL, PLOT_MANAGER + """ + + #edgecolor + edgecolor=options.getfieldvalue('edgecolor','None') + + #number of colorlevels for plots + colorlevels=options.getfieldvalue('colorlevels',128) + + alpha=options.getfieldvalue('alpha',1) + + #colormap + # default sequential colormap + defaultmap=truncate_colormap(mpl.cm.gnuplot2,0.1,0.9,128) + cmap=options.getfieldvalue('colormap',defaultmap) + if options.exist('cmap_set_over'): + over=options.getfieldvalue('cmap_set_over','0.5') + cmap.set_over(over) + if options.exist('cmap_set_under'): + under=options.getfieldvalue('cmap_set_under','0.5') + cmap.set_under(under) + + #normalize colormap if clim/caxis specified + if options.exist('clim'): + lims=options.getfieldvalue('clim',[npy.amin(data),npy.amax(data)]) + elif options.exist('caxis'): + lims=options.getfieldvalue('caxis',[npy.amin(data),npy.amax(data)]) + else: + if npy.amin(data)==npy.amax(data): + lims=[npy.amin(data)-0.5,npy.amax(data)+0.5] + else: + lims=[npy.amin(data),npy.amax(data)] + norm = mpl.colors.Normalize(vmin=lims[0], vmax=lims[1]) + if datatype==1: + #element plot + if is2d: + tri=ax.tripcolor(x,y,elements,data,colorlevels,cmap=cmap,norm=norm,alpha=alpha,edgecolors=edgecolor) + else: + raise ValueError('plot_unit error: 3D element plot not supported yet') + return + + elif datatype==2: + #node plot + if is2d: + tri=ax.tricontourf(x,y,elements,data,colorlevels,cmap=cmap,norm=norm,alpha=alpha,extend='both') + if edgecolor != 'None': + ax.triplot(x,y,elements,color=edgecolor) + else: + raise ValueError('plot_unit error: 3D node plot not supported yet') + return + + elif datatype==3: + vx=data[:,0] + vy=data[:,1] + #TODO write plot_quiver.py to handle this here + color=npy.sqrt(vx**2+vy**2) + scale=options.getfieldvalue('scale',1000) + width=options.getfieldvalue('width',0.005*(npy.amax(x)-npy.amin(y))) + headwidth=options.getfieldvalue('headwidth',3) + headlength=options.getfieldvalue('headlength',5) + Q=ax.quiver(x,y,vx,vy,color,cmap=cmap,norm=norm,scale=scale, + width=width,headwidth=headwidth,headlength=headlength) + return + + elif datatype==4: + #P1 patch plot + print('plot_unit message: P1 patch plot not implemented yet') + return + + elif datatype==5: + print('plot_unit message: P0 patch plot not implemented yet') + return + + else: + raise ValueError('datatype=%d not supported' % datatype) + Index: ../trunk-jpl/src/py3/plot/plot_overlay.py =================================================================== --- ../trunk-jpl/src/py3/plot/plot_overlay.py (revision 0) +++ ../trunk-jpl/src/py3/plot/plot_overlay.py (revision 19895) @@ -0,0 +1,132 @@ +import numpy as npy +from processmesh import processmesh +from processdata import processdata +from xy2ll import xy2ll +import matplotlib.pyplot as plt +import matplotlib as mpl +try: + from mpl_toolkits.basemap import Basemap +except ImportError: + print('Basemap toolkit not installed') + +import os + +try: + from osgeo import gdal +except ImportError: + print('osgeo/gdal for python not installed, plot_overlay is disabled') + + +def plot_overlay(md,data,options,ax): + ''' + Function for plotting a georeferenced image. This function is called + from within the plotmodel code. + ''' + + x,y,z,elements,is2d,isplanet=processmesh(md,[],options) + + if data=='none' or data==None: + imageonly=1 + data=npy.float('nan')*npy.ones((md.mesh.numberofvertices,)) + datatype=1 + else: + imageonly=0 + data,datatype=processdata(md,data,options) + + if not is2d: + raise Exception('overlay plot not supported for 3D meshes, project on a 2D layer first') + + if not options.exist('geotiff_name'): + raise Exception('overlay error: provide geotiff_name with path to geotiff file') + geotiff=options.getfieldvalue('geotiff_name') + + xlim=options.getfieldvalue('xlim',[min(md.mesh.x),max(md.mesh.x)]) + ylim=options.getfieldvalue('ylim',[min(md.mesh.y),max(md.mesh.y)]) + + gtif=gdal.Open(geotiff) + trans=gtif.GetGeoTransform() + xmin=trans[0] + xmax=trans[0]+gtif.RasterXSize*trans[1] + ymin=trans[3]+gtif.RasterYSize*trans[5] + ymax=trans[3] + + # allow supplied geotiff to have limits smaller than basemap or model limits + x0=max(min(xlim),xmin) + x1=min(max(xlim),xmax) + y0=max(min(ylim),ymin) + y1=min(max(ylim),ymax) + inputname='temp.tif' + os.system('gdal_translate -quiet -projwin ' + str(x0) + ' ' + str(y1) + ' ' + str(x1) + ' ' + str(y0) + ' ' + geotiff + ' ' + inputname) + + gtif=gdal.Open(inputname) + arr=gtif.ReadAsArray() + #os.system('rm -rf ./temp.tif') + + if gtif.RasterCount>=3: # RGB array + r=gtif.GetRasterBand(1).ReadAsArray() + g=gtif.GetRasterBand(2).ReadAsArray() + b=gtif.GetRasterBand(3).ReadAsArray() + arr=0.299*r+0.587*g+0.114*b + + # normalize array + arr=arr/npy.float(npy.max(arr.ravel())) + arr=1.-arr # somehow the values got flipped + + if options.getfieldvalue('overlayhist',0)==1: + ax=plt.gca() + num=2 + while True: + if not plt.fignum_exists(num): + break + else: + num+=1 + plt.figure(num) + plt.hist(arr.flatten(),bins=256,range=(0.,1.)) + plt.title('histogram of overlay image, use for setting overlaylims') + plt.show() + plt.sca(ax) # return to original axes/figure + + # get parameters from cropped geotiff + trans=gtif.GetGeoTransform() + xmin=trans[0] + xmax=trans[0]+gtif.RasterXSize*trans[1] + ymin=trans[3]+gtif.RasterYSize*trans[5] + ymax=trans[3] + dx=trans[1] + dy=trans[5] + + xarr=npy.arange(xmin,xmax,dx) + yarr=npy.arange(ymin,ymax,-dy) # -dy since origin='upper' (not sure how robust this is) + xg,yg=npy.meshgrid(xarr,yarr) + overlaylims=options.getfieldvalue('overlaylims',[min(arr.ravel()),max(arr.ravel())]) + norm=mpl.colors.Normalize(vmin=overlaylims[0],vmax=overlaylims[1]) + + if options.exist('basemap'): + # create coordinate grid in map projection units (for plotting) + lat,lon=xy2ll(xlim,ylim,-1,0,71) + #plt.sca(ax) + width=xmax-xmin + height=ymax-ymin + lat_0,lon_0=xy2ll(xmin+width/2.,ymin+height/2.,-1,0,71) + m=Basemap(projection='spstere', + llcrnrlon=lon[0],llcrnrlat=lat[0],urcrnrlon=lon[1],urcrnrlat=lat[1], + epsg=3031, + resolution='c') + #width=width,height=height,lon_0=lon_0,lat_0=lat_0, + #lat_0=-90,lon_0=0,lat_ts=-71, + #llcrnrx=x0,llcrnry=y0,urcrnrx=x1,urcrnry=y1) + #test + #m.ax=ax + meridians=npy.arange(-180.,181.,1.) + parallels=npy.arange(-80.,80.,1.) + m.drawparallels(parallels,labels=[0,0,1,1]) # labels=[left,right,top,bottom] + m.drawmeridians(meridians,labels=[1,1,0,0]) + m.drawcoastlines() + pc=m.pcolormesh(xg, yg, npy.flipud(arr), cmap=mpl.cm.Greys, norm=norm, ax=ax) + + else: + pc=ax.pcolormesh(xg, yg, npy.flipud(arr), cmap=mpl.cm.Greys, norm=norm) + + #rasterization? + if options.getfieldvalue('rasterized',0): + pc.set_rasterized(True) Index: ../trunk-jpl/src/py3/plot/plotmodel.py =================================================================== --- ../trunk-jpl/src/py3/plot/plotmodel.py (revision 0) +++ ../trunk-jpl/src/py3/plot/plotmodel.py (revision 19895) @@ -0,0 +1,99 @@ +import numpy as npy +from plotoptions import plotoptions + +try: + import pylab as p + import matplotlib.pyplot as plt + from mpl_toolkits.axes_grid1 import ImageGrid, AxesGrid +except ImportError: + print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled") + +from plot_manager import plot_manager +from math import ceil, sqrt + +def plotmodel(md,*args): + ''' + at command prompt, type 'plotdoc' for additional documentation + ''' + + #First process options + options=plotoptions(*args) + + #get number of subplots + subplotwidth=ceil(sqrt(options.numberofplots)) + + #Get figure number and number of plots + figurenumber=options.figurenumber + numberofplots=options.numberofplots + + #get hold + hold=options.list[0].getfieldvalue('hold',False) + + #if nrows and ncols specified, then bypass + if options.list[0].exist('nrows'): + nrows=options.list[0].getfieldvalue('nrows') + nr=True + else: + nrows=npy.ceil(numberofplots/subplotwidth) + nr=False + + if options.list[0].exist('ncols'): + ncols=options.list[0].getfieldvalue('ncols') + nc=True + else: + ncols=int(subplotwidth) + nc=False + ncols=int(ncols) + nrows=int(nrows) + + #check that nrows and ncols were given at the same time! + if not nr==nc: + raise Exception('error: nrows and ncols need to be specified together, or not at all') + + #Go through plots + if numberofplots: + + #if plt.fignum_exists(figurenumber): + # plt.cla() + + #if figsize specified + if options.list[0].exist('figsize'): + figsize=options.list[0].getfieldvalue('figsize') + fig=plt.figure(figurenumber,figsize=(figsize[0],figsize[1]),tight_layout=True) + else: + fig=plt.figure(figurenumber,tight_layout=True) + fig.clf() + + # options needed to define plot grid + direction=options.list[0].getfieldvalue('direction','row') # row,column + axes_pad=options.list[0].getfieldvalue('axes_pad',0.25) + add_all=options.list[0].getfieldvalue('add_all',True) # True,False + share_all=options.list[0].getfieldvalue('share_all',True) # True,False + label_mode=options.list[0].getfieldvalue('label_mode','1') # 1,L,all + cbar_mode=options.list[0].getfieldvalue('cbar_mode','each') # none,single,each + cbar_location=options.list[0].getfieldvalue('cbar_location','right') # right,top + cbar_size=options.list[0].getfieldvalue('cbar_size','5%') + cbar_pad=options.list[0].getfieldvalue('cbar_pad','2.5%') # None or % + + axgrid=ImageGrid(fig, 111, + nrows_ncols=(nrows,ncols), + direction=direction, + axes_pad=axes_pad, + add_all=add_all, + share_all=share_all, + label_mode=label_mode, + cbar_mode=cbar_mode, + cbar_location=cbar_location, + cbar_size=cbar_size, + cbar_pad=cbar_pad + ) + + if cbar_mode=='none': + for ax in axgrid.cbar_axes: fig._axstack.remove(ax) + + for i in range(numberofplots): + plot_manager(options.list[i].getfieldvalue('model',md),options.list[i],fig,axgrid[i]) + + fig.show() + else: + raise Exception('plotmodel error message: no output data found.') Index: ../trunk-jpl/src/py3/plot/applyoptions.py =================================================================== --- ../trunk-jpl/src/py3/plot/applyoptions.py (revision 0) +++ ../trunk-jpl/src/py3/plot/applyoptions.py (revision 19895) @@ -0,0 +1,315 @@ +import numpy as npy +from cmaptools import truncate_colormap +from plot_contour import plot_contour +from plot_streamlines import plot_streamlines +from expdisp import expdisp + +try: + from matplotlib.ticker import MaxNLocator + from mpl_toolkits.axes_grid1 import make_axes_locatable + from mpl_toolkits.mplot3d import Axes3D + import matplotlib as mpl + import pylab as p + import matplotlib.pyplot as plt +except ImportError: + print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled") + +def applyoptions(md,data,options,fig,ax): + ''' + APPLYOPTIONS - apply options to current plot + + 'plotobj' is the object returned by the specific plot call used to + render the data. This object is used for adding a colorbar. + + Usage: + applyoptions(md,data,options) + + See also: PLOTMODEL, PARSE_OPTIONS + ''' + + # get handle to current figure and axes instance + #fig = p.gcf() + #ax=p.gca() + + #font {{{ + fontsize=options.getfieldvalue('fontsize',8) + fontweight=options.getfieldvalue('fontweight','normal') + fontfamily=options.getfieldvalue('fontfamily','sans-serif') + font={'fontsize' :fontsize, + 'fontweight' :fontweight, + 'family' :fontfamily} + #}}} + + #title {{{ + if options.exist('title'): + title=options.getfieldvalue('title') + if options.exist('titlefontsize'): + titlefontsize=options.getfieldvalue('titlefontsize') +else: + titlefontsize=fontsize + if options.exist('titlefontweight'): + titlefontweight=options.getfieldvalue('titlefontweight') +else: + titlefontweight=fontweight + #title font + titlefont=font.copy() + titlefont['size']=titlefontsize + titlefont['weight']=titlefontweight + ax.set_title(title,**titlefont) + #}}} + + #xlabel, ylabel, zlabel {{{ + if options.exist('labelfontsize'): + labelfontsize=options.getfieldvalue('labelfontsize') +else: + labelfontsize=fontsize + if options.exist('labelfontweight'): + labelfontweight=options.getfieldvalue('labelfontweight') +else: + labelfontweight=fontweight + + #font dict for labels + labelfont=font.copy() + labelfont['fontsize']=labelfontsize + labelfont['fontweight']=labelfontweight + + if options.exist('xlabel'): + ax.set_xlabel(options.getfieldvalue('xlabel'),**labelfont) + if options.exist('ylabel'): + ax.set_ylabel(options.getfieldvalue('ylabel'),**labelfont) + if options.exist('zlabel'): + ax.set_zlabel(options.getfieldvalue('zlabel'),**labelfont) + #}}} + + #xticks, yticks, zticks (tick locations) {{{ + if options.exist('xticks'): + if options.exist('xticklabels'): + xticklabels=options.getfieldvalue('xticklabels') + ax.set_xticks(options.getfieldvalue('xticks'),xticklabels) +else: + ax.set_xticks(options.getfieldvalue('xticks')) + if options.exist('yticks'): + if options.exist('yticklabels'): + yticklabels=options.getfieldvalue('yticklabels') + ax.set_yticks(options.getfieldvalue('yticks'),yticklabels) +else: + ax.set_yticks(options.getfieldvalue('yticks')) + if options.exist('zticks'): + if options.exist('zticklabels'): + zticklabels=options.getfieldvalue('zticklabels') + ax.set_zticks(options.getfieldvalue('zticks'),zticklabels) +else: + ax.set_zticks(options.getfieldvalue('zticks')) + #}}} + + #xticklabels,yticklabels,zticklabels {{{ + if options.getfieldvalue('ticklabels','off')=='off' or options.getfieldvalue('ticklabels',0)==0: + options.addfielddefault('xticklabels',[]) + options.addfielddefault('yticklabels',[]) + # TODO check if ax has a z-axis (e.g. is 3D) + if options.exist('xticklabels'): + xticklabels=options.getfieldvalue('xticklabels') + ax.set_xticklabels(xticklabels) + if options.exist('yticklabels'): + yticklabels=options.getfieldvalue('yticklabels') + ax.set_yticklabels(yticklabels) + if options.exist('zticklabels'): + zticklabels=options.getfieldvalue('zticklabels') + ax.set_zticklabels(zticklabels) + #}}} + + #ticklabel notation {{{ + #ax.ticklabel_format(style='sci',scilimits=(0,0)) + #}}} + + #ticklabelfontsize {{{ + if options.exist('ticklabelfontsize'): + for label in ax.get_xticklabels() + ax.get_yticklabels(): + label.set_fontsize(options.getfieldvalue('ticklabelfontsize')) + if int(md.mesh.dimension)==3: + for label in ax.get_zticklabels(): + label.set_fontsize(options.getfieldvalue('ticklabelfontsize')) + #}}} + + #view + #if int(md.mesh.dimension) == 3 and options.exist('layer'): + # #options.getfieldvalue('view') ? + # ax=fig.gca(projection='3d') + #plt.show() + + #axis {{{ + if options.exist('axis'): + if options.getfieldvalue('axis',True)=='off': + ax.ticklabel_format(style='plain') + p.setp(ax.get_xticklabels(), visible=False) + p.setp(ax.get_yticklabels(), visible=False) + # }}} + + #box + if options.exist('box'): + eval(options.getfieldvalue('box')) + + #xlim, ylim, zlim {{{ + if options.exist('xlim'): + ax.set_xlim(options.getfieldvalue('xlim')) + if options.exist('ylim'): + ax.set_ylim(options.getfieldvalue('ylim')) + if options.exist('zlim'): + ax.set_zlim(options.getfieldvalue('zlim')) + #}}} + + #latlon + + #Basinzoom + + #ShowBasins + + #clim {{{ + if options.exist('clim'): + lims=options.getfieldvalue('clim') + assert len(lims)==2, 'error, clim should be passed as a list of length 2' +elif options.exist('caxis'): + lims=options.getfieldvalue('caxis') + assert len(lims)==2, 'error, caxis should be passed as a list of length 2' + options.addfielddefault('clim',lims) +else: + if len(data)>0: lims=[data.min(),data.max()] +else: lims=[0,1] +#}}} + + #shading + #if options.exist('shading'): + + #grid {{{ + if options.exist('grid'): + if 'on' in options.getfieldvalue('grid','on'): + ax.grid() + #}}} + + #colormap {{{ + # default sequential colormap + defaultmap=truncate_colormap(mpl.cm.gnuplot2,0.1,0.9,128) + cmap=options.getfieldvalue('colormap',defaultmap) + norm = mpl.colors.Normalize(vmin=lims[0], vmax=lims[1]) + options.addfield('colornorm',norm) + cbar_extend=0 + if options.exist('cmap_set_over'): + over=options.getfieldvalue('cmap_set_over','0.5') + cmap.set_over(over) + cbar_extend+=1 + if options.exist('cmap_set_under'): + under=options.getfieldvalue('cmap_set_under','0.5') + cmap.set_under(under) + cbar_extend+=2 + options.addfield('colormap',cmap) + #}}} + + #contours {{{ + if options.exist('contourlevels'): + plot_contour(md,data,options,ax) + #}}} + + #wrapping + + #colorbar {{{ + if options.getfieldvalue('colorbar',1)==1: + if cbar_extend==0: + extend='neither' +elif cbar_extend==1: + extend='max' +elif cbar_extend==2: + extend='min' +elif cbar_extend==3: + extend='both' + cb = mpl.colorbar.ColorbarBase(ax.cax, cmap=cmap, norm=norm, extend=extend) + if options.exist('alpha'): + cb.set_alpha(options.getfieldvalue('alpha')) + if options.exist('colorbarnumticks'): + cb.locator=MaxNLocator(nbins=options.getfieldvalue('colorbarnumticks',5)) +else: + cb.locator=MaxNLocator(nbins=5) # default 5 ticks + if options.exist('colorbartickspacing'): + locs=npy.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbartickspacing')) + cb.set_ticks(locs) + if options.exist('colorbarlines'): + locs=npy.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbarlines')) + cb.add_lines(locs,['k' for i in range(len(locs))],npy.ones_like(locs)) + if options.exist('colorbarlineatvalue'): + locs=options.getfieldvalue('colorbarlineatvalue') + colors=options.getfieldvalue('colorbarlineatvaluecolor',['k' for i in range (len(locs))]) + widths=options.getfieldvalue('colorbarlineatvaluewidth',npy.ones_like(locs)) + cb.add_lines(locs,colors,widths) + if options.exist('colorbartitle'): + if options.exist('colorbartitlepad'): + cb.set_label(options.getfieldvalue('colorbartitle'),labelpad=options.getfieldvalue('colorbartitlepad'),fontsize=fontsize) +else: + cb.set_label(options.getfieldvalue('colorbartitle'),fontsize=fontsize) + cb.ax.tick_params(labelsize=fontsize) + cb.solids.set_rasterized(True) + cb.update_ticks() + cb.set_alpha(1) + cb.draw_all() + plt.sca(ax) # return to original axes control + #}}} + + #expdisp {{{ + if options.exist('expdisp'): + filename=options.getfieldvalue('expdisp') + style=options.getfieldvalue('expstyle','k') + linewidth=options.getfieldvalue('explinewidth',1) + for i in range(len(filename)): + filenamei=filename[i] + stylei=style[i] + if type(linewidth)==list: + linewidthi=linewidth[i] +else: + linewidthi=linewidth + expdisp(filenamei,ax,linestyle=stylei,linewidth=linewidthi,unitmultiplier=options.getfieldvalue('unit',1)) + #}}} + + #area + + #text {{{ + if options.exist('text'): + text=options.getfieldvalue('text') + textx=options.getfieldvalue('textx') + texty=options.getfieldvalue('texty') + textcolor=options.getfieldvalue('textcolor') + textweight=options.getfieldvalue('textweight') + textrotation=options.getfieldvalue('textrotation') + textfontsize=options.getfieldvalue('textfontsize') + for label,x,y,size,color,weight,rotation in zip(text,textx,texty,textfontsize,textcolor,textweight,textrotation): + ax.text(x,y,label,transform=ax.transAxes,fontsize=size,color=color,weight=weight,rotation=rotation) + #}}} + + #north arrow + + #scale ruler + + #streamlines + if options.exist('streamlines'): + plot_streamlines(md,options,ax) + + + #axis positions + + #figure position + + #axes position + + #showregion + + #flat edges of a partition + + #scatter + + #backgroundcolor + + #figurebackgroundcolor + + #lighting + + #point cloud + + #inset + Index: ../trunk-jpl/src/py3/plot/export_gl.py =================================================================== --- ../trunk-jpl/src/py3/plot/export_gl.py (revision 0) +++ ../trunk-jpl/src/py3/plot/export_gl.py (revision 19895) @@ -0,0 +1,124 @@ +from plotoptions import plotoptions +from checkplotoptions import checkplotoptions +from model import model +import numpy as np +import math +from writejsfile import writejsfile + +def export_gl(md,*varargin): + class ResultObj(object): + def __getattr__(self, attr): + return self.__dict__.get(attr) + + print ('getting options') + templist=plotoptions(varargin); + optionslist=templist.list; + options=optionslist[1]; + options=checkplotoptions(md,options); + #print (templist,options) + #templist contains options 0-3. Use in the future to rework. + + #Setup unique directory in present dir: + print ('setting directory') + directory=optionslist[0].getfieldvalue('directory'); + databasename=optionslist[0].getfieldvalue('database'); + + #scaling factor: + print ('setting scaling factor') + scaling_factor=optionslist[0].getfieldvalue('scaling_factor'); + + #Deal with title: + print ('setting title') + if optionslist[0].exist('title'): + title=optionslist[0].getfieldvalue('title'); + else: + title=''; + + #initialize model: + print ('initializing model') + model.title=title; + model.initialZoomFactor=options.getfieldvalue('zoom',-.25); + + #Deal with contour {{{ + print ('getting contour') + print((md.mesh.segments)) + segmenets0 = [s - 1 for s in md.mesh.segments[:,0]]; + segmenets1 = [s - 1 for s in md.mesh.segments[:,1]]; + + contour_lat1=md.mesh.lat.take(segmenets0) + contour_lat2=md.mesh.lat.take(segmenets1); + contour_long1=md.mesh.long.take(segmenets0); + contour_long2=md.mesh.long.take(segmenets1); + contour_surface1=md.geometry.surface.take(segmenets0); + contour_surface2=md.geometry.surface.take(segmenets1); + + R1=6371000*np.ones(len(contour_surface1))+scaling_factor*contour_surface1; + R2=6371000*np.ones(len(contour_surface2))+scaling_factor*contour_surface2; + + model.contourx1 = list(map(lambda r, lat, int: r * math.cos(math.radians(lat)) * math.cos(math.radians(int)), R1, contour_lat1, contour_long1)); + model.contoury1 = list(map(lambda r, lat, int: r * math.cos(math.radians(lat)) * math.sin(math.radians(int)), R1, contour_lat1, contour_long1)); + model.contourz1 = list(map(lambda r, lat: r * math.sin(math.radians(lat)), R1, contour_lat1)); + + model.contourx2 = list(map(lambda r, lat, int: r * math.cos(math.radians(lat)) * math.cos(math.radians(int)), R2, contour_lat2, contour_long2)); + model.contoury2 = list(map(lambda r, lat, int: r * math.cos(math.radians(lat)) * math.sin(math.radians(int)), R2, contour_lat2, contour_long2)); + model.contourz2 = list(map(lambda r, lat: r * math.sin(math.radians(lat)), R2, contour_lat2)); + + #}}} + #Deal with mesh and results {{{ + print ('getting mesh') + surface=md.geometry.surface.flatten(); + numberofelements=md.mesh.numberofelements; + numberofvertices=md.mesh.numberofvertices; + R=6371000*np.ones(len(md.mesh.lat))+scaling_factor*surface; + + x = list(map(lambda r, lat, int: r * math.cos(math.radians(lat)) * math.cos(math.radians(int)), R, md.mesh.lat,md.mesh.long)); + y = list(map(lambda r, lat, int: r * math.cos(math.radians(lat)) * math.sin(math.radians(int)), R, md.mesh.lat,md.mesh.long)); + z = list(map(lambda r, lat: r * math.sin(math.radians(lat)), R, md.mesh.lat)); + + #Deal with triangulation: + print('getting triangulation') + model.index=md.mesh.elements; + model.x=x; + model.y=y; + model.z=z; + model.surface=surface; + + results = [] + print(optionslist) + #Deal with data: + print('getting data') + for i in range(0,len(optionslist)): + options=optionslist[i]; + options=checkplotoptions(md,options); + data=options.getfieldvalue('data').flatten(); + results.append(ResultObj()) + results[i].data=data; + results[i].caxis=options.getfieldvalue('caxis',[min(data), max(data)]); + + label=options.getfieldvalue('label',''); + if label=='': + #create generic label: + label=['data', str(i)]; + results[i].label=label; + + shortlabel=options.getfieldvalue('shortlabel',''); + if shortlabel=='': + #create generic short label: + shortlabel=['data', str(i)]; + results[i].shortlabel=shortlabel; + + if type(data[2])!=np.float64: + time_range=options.getfieldvalue('time_range',[0, 100]); + results[i].time_range=time_range; + + unit=options.getfieldvalue('unit',''); + if unit=='': + #create generic unit: + unit='SI'; + results[i].unit=unit; + model.results=results; + + #Write model to javascript database file: + print('writing to file') + writejsfile(directory + databasename + '.js',model,databasename); +#}}} Index: ../trunk-jpl/src/py3/plot/plot_streamlines.py =================================================================== --- ../trunk-jpl/src/py3/plot/plot_streamlines.py (revision 0) +++ ../trunk-jpl/src/py3/plot/plot_streamlines.py (revision 19895) @@ -0,0 +1,64 @@ +import numpy as npy +import matplotlib.pyplot as plt +import matplotlib.tri as tri +from processmesh import processmesh +from processdata import processdata +from scipy.interpolate import griddata +from ContourToMesh import ContourToMesh + +def plot_streamlines(md,options,ax): + ''' + plot streamlines on a figure, using by default vx and vy components in md.initialization. + + Usage: + plot_streamlines(md,options,ax) + + available options, to be passed to plotmodel as a string-value pair: + streamlinesvx : vx component (default md.initialization.vx) + streamlinesvy : vy component (default md.initialization.vy) + streamlinescolor: color string + streamlinesdensity: density of plotted streamlines (default 1) + streamlineswidth: linewidth value or 'vel' to scale by velocity + streamlineswidthscale: scaling multiplier for linewidth scaled by velocity + streamlinesarrowsize: size of arrows on lines (default 1) + + ''' + + # retrieve options + vx=options.getfieldvalue('streamlinesvx',md.initialization.vx) + vy=options.getfieldvalue('streamlinesvy',md.initialization.vy) + color=options.getfieldvalue('streamlinescolor','k') + linewidth=options.getfieldvalue('streamlineswidth',1) + density=options.getfieldvalue('streamlinesdensity',1) + arrowsize=options.getfieldvalue('streamlinesarrowsize',1) + + #process mesh and data + x,y,z,elements,is2d,isplanet=processmesh(md,vx,options) + u,datatype=processdata(md,vx,options) + v,datatype=processdata(md,vy,options) + + if not is2d: + raise Exception('plot_streamlines error: streamlines option not supported for 3D plots') + + # format data for matplotlib streamplot function + yg,xg=npy.mgrid[min(md.mesh.y):max(md.mesh.y):100j,min(md.mesh.x):max(md.mesh.x):100j] + ug=griddata((x,y),u,(xg,yg),method='linear') + vg=griddata((x,y),v,(xg,yg),method='linear') + + # create triangulation instance + triang=tri.Triangulation(md.mesh.x,md.mesh.y,md.mesh.elements-1) + + # interpolate to regularly spaced quad grid + interp_lin_u=tri.LinearTriInterpolator(triang,u) + interp_lin_v=tri.LinearTriInterpolator(triang,v) + ug=interp_lin_u(xg,yg) + vg=interp_lin_v(xg,yg) + + if linewidth=='vel': + scale=options.getfieldvalue('streamlineswidthscale',3) + vel=npy.sqrt(ug**2+vg**2) + linewidth=scale*vel/npy.amax(vel) + linewidth[linewidth<0.5]=0.5 + + # plot streamlines + ax.streamplot(xg,yg,ug,vg,color=color,linewidth=linewidth,density=density,arrowsize=arrowsize) Index: ../trunk-jpl/src/py3/plot/plot_contour.py =================================================================== --- ../trunk-jpl/src/py3/plot/plot_contour.py (revision 0) +++ ../trunk-jpl/src/py3/plot/plot_contour.py (revision 19895) @@ -0,0 +1,38 @@ +from averaging import averaging +import matplotlib.pyplot as plt +from processmesh import processmesh +from processdata import processdata + +def plot_contour(md,datain,options,ax): + ''' + plot contours of a given field (called within plotmodel) + + Usage: + plot_contour(md,data,options) + + See also: plotmodel + ''' + + x,y,z,elements,is2d,isplanet=processmesh(md,datain,options) + data,datatype=processdata(md,datain,options) + + # process data: must be on nodes + if datatype==1: # element data + data=averaging(md,data,0) + elif datatype==2: + pass + elif datatype==3: # quiver (vector) data + data=npy.sqrt(datain**2) + else: + raise ValueError('datatype not supported in call to plot_contour') + + # contouronly TODO (cla will also clear an overlay image) + + # retrieve necessary options + levels=options.getfieldvalue('contourlevels') + colors=options.getfieldvalue('contourcolors') + norm=options.getfieldvalue('colornorm') + linestyles=options.getfieldvalue('contourlinestyles') + linewidths=options.getfieldvalue('contourlinewidths') + + ax.tricontour(x,y,elements,data,levels,colors=colors,norm=norm,linestyles=linestyles,linewidths=linewidths) Index: ../trunk-jpl/src/py3/plot/writejsfile.py =================================================================== --- ../trunk-jpl/src/py3/plot/writejsfile.py (revision 0) +++ ../trunk-jpl/src/py3/plot/writejsfile.py (revision 19895) @@ -0,0 +1,57 @@ +import numpy +from writejsfield import writejsfield +def writejsfile(filename,model,keyname): +#WRITEJSFILE - write model file to javascript database +# +# Usage: +# writejsfile(filename,model,keyname) +# + + nods=len(model.x) + nel=len(model.index) + nx=len(model.contourx1) + print(filename) + fid=open(filename,'w', 0) + + fid.write('model = {};\n') + fid.write('model["title"]="{0}";\n'.format(model.title)) + fid.write('model["initialZoomFactor"]={0};\n'.format(model.initialZoomFactor)) + #write index: + fid.write('\n') + fid.write('model["index"]=[') + for i in range(0, nel-1): + fid.write('[{0}, {1}, {2}],'.format(model.index[i][0],model.index[i][1],model.index[i][2])) + fid.write('[{0}, {1}, {2}]];\n'.format(model.index[-1][0],model.index[-1][1],model.index[-1][2])) + fid.write('\n') + print('writing model coordinates') + writejsfield(fid,'model["x"]',model.x,nods) + writejsfield(fid,'model["y"]',model.y,nods) + writejsfield(fid,'model["z"]',model.z,nods) + writejsfield(fid,'model["surface"]',model.surface,nods) + writejsfield(fid,'model["contourx1"]',model.contourx1,nx) + writejsfield(fid,'model["contoury1"]',model.contoury1,nx) + writejsfield(fid,'model["contourz1"]',model.contourz1,nx) + writejsfield(fid,'model["contourx2"]',model.contourx2,nx) + writejsfield(fid,'model["contoury2"]',model.contoury2,nx) + writejsfield(fid,'model["contourz2"]',model.contourz2,nx) + + print('writing results') + results=model.results + fid.write('results={};\n') + + for i in range(0,len(results)): + fid.write('result={};\n') + writejsfield(fid,'result["data"]',results[i].data,nods) + fid.write('\n') + fid.write('result["caxis"]=[{0},{1}];\n'.format(results[i].caxis[0],results[i].caxis[1])) + fid.write('result["label"]="{0}";\n'.format(results[i].label)) + fid.write('result["shortlabel"]="{0}";\n'.format(results[i].shortlabel)) + fid.write('result["unit"]="{0}";\n'.format(results[i].unit)) + if type(results[i].data)==numpy.float64: + fid.write('result["time_range"]=[{0},{1}];\n'.format(results[i].time_range[0],results[i].time_range[1])) + fid.write('results["{0}"]=result;\n'.format(i)) + fid.write('\n') + fid.write('model.results=results;\n') + fid.write('models["{0}"]=model;\n'.format(keyname)) + + fid.close()