Index: /issm/trunk-jpl/src/py3/boundaryconditions/PattynSMB.py
===================================================================
--- /issm/trunk-jpl/src/py3/boundaryconditions/PattynSMB.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/boundaryconditions/PattynSMB.py	(revision 21255)
@@ -1,4 +1,4 @@
 import os
-import numpy as npy
+import numpy as np
 def PattynSMB(md,Tf):
 	"""
@@ -25,5 +25,5 @@
 
 	#Double check lat and long exist:
-	if npy.any(npy.isnan(md.mesh.lat)): 
+	if np.any(np.isnan(md.mesh.lat)): 
 		raise IOError('PattynSMB error message: md.mesh.lat field required')
 
@@ -36,5 +36,5 @@
 	# Calculate summer temperature, Eqn (12)
 	# No melting at PIG in mean conditions - need about 6 degress Tf to start having a negative yearly SMB
-	Tms = 16.81 - 0.00692*md.geometry.surface - 0.27937*npy.abs(md.mesh.lat) + Tf
+	Tms = 16.81 - 0.00692*md.geometry.surface - 0.27937*np.abs(md.mesh.lat) + Tf
 	Tms= Tms[0]
 
@@ -43,7 +43,7 @@
 
 	# Calculate Ablation, Eqn (10) (use for both Antarctica & Greenland), max melt is 10m/a
-	ABLdot=0.*npy.ones(md.mesh.numberofvertices)
-	pos=npy.nonzero(Tms>=0)
-	ABLdot[pos]=npy.minimum(1.4*Tms[pos],10)
+	ABLdot=0.*np.ones(md.mesh.numberofvertices)
+	pos=np.nonzero(Tms>=0)
+	ABLdot[pos]=np.minimum(1.4*Tms[pos],10)
 
 	smb=ACCdot-ABLdot
Index: /issm/trunk-jpl/src/py3/classes/outputdefinition.py
===================================================================
--- /issm/trunk-jpl/src/py3/classes/outputdefinition.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/classes/outputdefinition.py	(revision 21255)
@@ -4,5 +4,5 @@
 from checkfield import checkfield
 from WriteData import WriteData
-import numpy as npy
+import numpy as np
 
 class outputdefinition(object):
@@ -36,5 +36,5 @@
 	def marshall(self,md,fid):    # {{{
 		
-		enums=npy.zeros(len(self.definitions),)
+		enums=np.zeros(len(self.definitions),)
 		
 		for i in range(len(self.definitions)):
@@ -44,5 +44,5 @@
 			enums[i]=StringToEnum(classdefinition)[0]
 		
-		enums=npy.unique(enums);
+		enums=np.unique(enums);
 		
 		WriteData(fid,'data',enums,'enum',OutputdefinitionListEnum(),'format','DoubleMat','mattype',1);
Index: /issm/trunk-jpl/src/py3/coordsystems/ll2xy.py
===================================================================
--- /issm/trunk-jpl/src/py3/coordsystems/ll2xy.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/coordsystems/ll2xy.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy 
+import numpy as np 
 
 def ll2xy(lat,lon,sgn=-1,central_meridian=0,standard_parallel=71):
@@ -36,24 +36,24 @@
 	ex2 = .006693883
 	# Eccentricity of the Hughes ellipsoid
-	ex = npy.sqrt(ex2)
+	ex = np.sqrt(ex2)
 	
-	latitude = npy.abs(lat) * npy.pi/180.
-	longitude = (lon + delta) * npy.pi/180.
+	latitude = np.abs(lat) * np.pi/180.
+	longitude = (lon + delta) * np.pi/180.
 	
 	# compute X and Y in grid coordinates.
-	T = npy.tan(npy.pi/4-latitude/2) / ((1-ex*npy.sin(latitude))/(1+ex*npy.sin(latitude)))**(ex/2)
+	T = np.tan(np.pi/4-latitude/2) / ((1-ex*np.sin(latitude))/(1+ex*np.sin(latitude)))**(ex/2)
 	
 	if (90 - slat) <  1.e-5:
-		rho = 2.*re*T/npy.sqrt((1.+ex)**(1.+ex)*(1.-ex)**(1.-ex))
+		rho = 2.*re*T/np.sqrt((1.+ex)**(1.+ex)*(1.-ex)**(1.-ex))
 	else:
-		sl  = slat*npy.pi/180.
-		tc  = npy.tan(npy.pi/4.-sl/2.)/((1.-ex*npy.sin(sl))/(1.+ex*npy.sin(sl)))**(ex/2.)
-		mc  = npy.cos(sl)/npy.sqrt(1.0-ex2*(npy.sin(sl)**2))
+		sl  = slat*np.pi/180.
+		tc  = np.tan(np.pi/4.-sl/2.)/((1.-ex*np.sin(sl))/(1.+ex*np.sin(sl)))**(ex/2.)
+		mc  = np.cos(sl)/np.sqrt(1.0-ex2*(np.sin(sl)**2))
 		rho = re*mc*T/tc
 	
-	y = -rho * sgn * npy.cos(sgn*longitude)
-	x =  rho * sgn * npy.sin(sgn*longitude)
+	y = -rho * sgn * np.cos(sgn*longitude)
+	x =  rho * sgn * np.sin(sgn*longitude)
 
-	cnt1=npy.nonzero(latitude>= npy.pi/2.)[0]
+	cnt1=np.nonzero(latitude>= np.pi/2.)[0]
 	
 	if cnt1:
Index: /issm/trunk-jpl/src/py3/coordsystems/xy2ll.py
===================================================================
--- /issm/trunk-jpl/src/py3/coordsystems/xy2ll.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/coordsystems/xy2ll.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from math import pi
 
@@ -39,7 +39,7 @@
 	# if x,y passed as lists, convert to numpy arrays
 	if type(x) != "numpy.ndarray":
-		x=npy.array(x)
+		x=np.array(x)
 	if type(y) != "numpy.ndarray":
-		y=npy.array(y)
+		y=np.array(y)
 
 	## Conversion constant from degrees to radians
@@ -50,26 +50,26 @@
 	ex2 = .006693883
 	## Eccentricity of the Hughes ellipsoid
-	ex = npy.sqrt(ex2)
+	ex = np.sqrt(ex2)
 	
 	sl = slat*pi/180.
-	rho = npy.sqrt(x**2 + y**2)
-	cm = npy.cos(sl) / npy.sqrt(1.0 - ex2 * (npy.sin(sl)**2))
-	T = npy.tan((pi/4.0) - (sl/2.0)) / ((1.0 - ex*npy.sin(sl)) / (1.0 + ex*npy.sin(sl)))**(ex / 2.0)
+	rho = np.sqrt(x**2 + y**2)
+	cm = np.cos(sl) / np.sqrt(1.0 - ex2 * (np.sin(sl)**2))
+	T = np.tan((pi/4.0) - (sl/2.0)) / ((1.0 - ex*np.sin(sl)) / (1.0 + ex*np.sin(sl)))**(ex / 2.0)
 	
 	if abs(slat-90.) < 1.e-5:
-		T = rho*npy.sqrt((1. + ex)**(1. + ex) * (1. - ex)**(1. - ex)) / 2. / re
+		T = rho*np.sqrt((1. + ex)**(1. + ex) * (1. - ex)**(1. - ex)) / 2. / re
 	else:
 		T = rho * T / (re * cm)
 	
-	chi = (pi / 2.0) - 2.0 * npy.arctan(T)
+	chi = (pi / 2.0) - 2.0 * np.arctan(T)
 	lat = chi + ((ex2 / 2.0) + (5.0 * ex2**2.0 / 24.0) + (ex2**3.0 / 12.0)) * \
-		npy.sin(2 * chi) + ((7.0 * ex2**2.0 / 48.0) + (29.0 * ex2**3 / 240.0)) * \
-		npy.sin(4.0 * chi) + (7.0 * ex2**3.0 / 120.0) * npy.sin(6.0 * chi) 
+		np.sin(2 * chi) + ((7.0 * ex2**2.0 / 48.0) + (29.0 * ex2**3 / 240.0)) * \
+		np.sin(4.0 * chi) + (7.0 * ex2**3.0 / 120.0) * np.sin(6.0 * chi) 
 	
 	lat = sgn * lat
-	lon = npy.arctan2(sgn * x,-sgn * y)
+	lon = np.arctan2(sgn * x,-sgn * y)
 	lon = sgn * lon
 	
-	res1 = npy.nonzero(rho <= 0.1)[0]
+	res1 = np.nonzero(rho <= 0.1)[0]
 	if len(res1) > 0:
 		lat[res1] = 90. * sgn
Index: /issm/trunk-jpl/src/py3/exp/expcoarsen.py
===================================================================
--- /issm/trunk-jpl/src/py3/exp/expcoarsen.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/exp/expcoarsen.py	(revision 21255)
@@ -1,4 +1,4 @@
 import os.path
-import numpy as npy
+import numpy as np
 from collections import OrderedDict
 from expread import expread
@@ -34,5 +34,5 @@
 	for contour in  contours:
 		
-		numpoints=npy.size(contour['x'])
+		numpoints=np.size(contour['x'])
 
 		j=0
@@ -44,17 +44,17 @@
 
 			#see whether we keep this point or not
-			distance=npy.sqrt((x[j]-x[j+1])**2+(y[j]-y[j+1])**2)
+			distance=np.sqrt((x[j]-x[j+1])**2+(y[j]-y[j+1])**2)
 			if distance<resolution and j<numpoints-2:   #do not remove last point
-				x=npy.delete(x,j+1,0)
-				y=npy.delete(y,j+1,0)
+				x=np.delete(x,j+1,0)
+				y=np.delete(y,j+1,0)
 				numpoints=numpoints-1
 			else:
-				division=int(npy.floor(distance/resolution)+1)
+				division=int(np.floor(distance/resolution)+1)
 				if division>=2:
-					xi=npy.linspace(x[j],x[j+1],division)
-					yi=npy.linspace(y[j],y[j+1],division)
+					xi=np.linspace(x[j],x[j+1],division)
+					yi=np.linspace(y[j],y[j+1],division)
 					
-					x=npy.hstack((x[0:j+1],xi[1:-1],x[j+1:]))
-					y=npy.hstack((y[0:j+1],yi[1:-1],y[j+1:]))
+					x=np.hstack((x[0:j+1],xi[1:-1],x[j+1:]))
+					y=np.hstack((y[0:j+1],yi[1:-1],y[j+1:]))
 
 					#update current point
@@ -65,8 +65,8 @@
 					j=j+1
 		
-		if npy.size(x)>1:
+		if np.size(x)>1:
 			#keep the (x,y) contour arond
 			newcontour=OrderedDict()
-			newcontour['nods']=npy.size(x)
+			newcontour['nods']=np.size(x)
 			newcontour['density']=contour['density']
 			newcontour['x']=x
Index: /issm/trunk-jpl/src/py3/exp/expdisp.py
===================================================================
--- /issm/trunk-jpl/src/py3/exp/expdisp.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/exp/expdisp.py	(revision 21255)
@@ -1,4 +1,4 @@
 from expread import expread
-import numpy as npy
+import numpy as np
 
 def expdisp(domainoutline,ax,linestyle='--k',linewidth=1,unitmultiplier=1.):
Index: /issm/trunk-jpl/src/py3/extrusion/DepthAverage.py
===================================================================
--- /issm/trunk-jpl/src/py3/extrusion/DepthAverage.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/extrusion/DepthAverage.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from project2d import project2d
 
@@ -19,7 +19,7 @@
 
 	# coerce to array in case float is passed
-	if type(vector)!=npy.ndarray:
+	if type(vector)!=np.ndarray:
 		print('coercing array')
-		vector=npy.array(value)
+		vector=np.array(value)
 
 	vec2d=False
@@ -30,5 +30,5 @@
 	#nods data
 	if vector.shape[0]==md.mesh.numberofvertices:
-		vector_average=npy.zeros(md.mesh.numberofvertices2d)
+		vector_average=np.zeros(md.mesh.numberofvertices2d)
 		for i in range(1,md.mesh.numberoflayers):
 			vector_average=vector_average+(project2d(md,vector,i)+project2d(md,vector,i+1))/2.*(project2d(md,md.mesh.z,i+1)-project2d(md,md.mesh.z,i))
@@ -37,5 +37,5 @@
 	#element data
 	elif vector.shape[0]==md.mesh.numberofelements:
-		vector_average=npy.zeros(md.mesh.numberofelements2d)
+		vector_average=np.zeros(md.mesh.numberofelements2d)
 		for i in range(1,md.mesh.numberoflayers):
 			vector_average=vector_average+project2d(md,vector,i)*(project2d(md,md.mesh.z,i+1)-project2d(md,md.mesh.z,i))
Index: /issm/trunk-jpl/src/py3/extrusion/project2d.py
===================================================================
--- /issm/trunk-jpl/src/py3/extrusion/project2d.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/extrusion/project2d.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def project2d(md3d,value,layer):
@@ -25,7 +25,7 @@
 	
 	# coerce to array in case float is passed
-	if type(value)!=npy.ndarray:
+	if type(value)!=np.ndarray:
 		print('coercing array')
-		value=npy.array(value)
+		value=np.array(value)
 
 	vec2d=False
Index: /issm/trunk-jpl/src/py3/geometry/slope.py
===================================================================
--- /issm/trunk-jpl/src/py3/geometry/slope.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/geometry/slope.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from GetNodalFunctionsCoeff import  GetNodalFunctionsCoeff
 
@@ -33,14 +33,14 @@
 	alpha,beta=GetNodalFunctionsCoeff(index,x,y)[0:2]
 
-	summation=npy.array([[1],[1],[1]])
-	sx=npy.dot(surf[index-1]*alpha,summation).reshape(-1,)
-	sy=npy.dot(surf[index-1]*beta,summation).reshape(-1,)
+	summation=np.array([[1],[1],[1]])
+	sx=np.dot(surf[index-1]*alpha,summation).reshape(-1,)
+	sy=np.dot(surf[index-1]*beta,summation).reshape(-1,)
 
-	s=npy.sqrt(sx**2+sy**2)
+	s=np.sqrt(sx**2+sy**2)
 
 	if md.mesh.dimension()==3:
 		sx=project3d(md,'vector',sx,'type','element')
 		sy=project3d(md,'vector',sy,'type','element')
-		s=npy.sqrt(sx**2+sy**2)
+		s=np.sqrt(sx**2+sy**2)
 
 	return (sx,sy,s)
Index: /issm/trunk-jpl/src/py3/interp/SectionValues.py
===================================================================
--- /issm/trunk-jpl/src/py3/interp/SectionValues.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/interp/SectionValues.py	(revision 21255)
@@ -1,5 +1,5 @@
 import os
 from expread import expread
-import numpy as npy
+import numpy as np
 from project2d import project2d
 #from InterpFromMesh2d import InterpFromMesh2d
@@ -41,7 +41,7 @@
 
 	#initialization
-	X=npy.array([]) #X-coordinate
-	Y=npy.array([]) #Y-coordinate
-	S=npy.array([0.])  #curvilinear coordinate
+	X=np.array([]) #X-coordinate
+	Y=np.array([]) #Y-coordinate
+	S=np.array([0.])  #curvilinear coordinate
 	
 	for i in range(nods-1):
@@ -53,10 +53,10 @@
 		s_start=S[-1]
 	
-		length_segment=npy.sqrt((x_end-x_start)**2+(y_end-y_start)**2)
-		portion=npy.ceil(length_segment/res_h)
+		length_segment=np.sqrt((x_end-x_start)**2+(y_end-y_start)**2)
+		portion=np.ceil(length_segment/res_h)
 	
-		x_segment=npy.zeros(portion)
-		y_segment=npy.zeros(portion)
-		s_segment=npy.zeros(portion)
+		x_segment=np.zeros(portion)
+		y_segment=np.zeros(portion)
+		s_segment=np.zeros(portion)
 
 		for j in range(int(portion)):
@@ -66,10 +66,10 @@
 	
 		#plug into X and Y
-		X=npy.append(X,x_segment)
-		Y=npy.append(Y,y_segment)
-		S=npy.append(S,s_segment)
+		X=np.append(X,x_segment)
+		Y=np.append(Y,y_segment)
+		S=np.append(S,s_segment)
 
-	X=npy.append(X,x[nods-1])
-	Y=npy.append(Y,y[nods-1])
+	X=np.append(X,x[nods-1])
+	Y=np.append(Y,y[nods-1])
 	
 	#Number of nodes:
@@ -77,5 +77,5 @@
 	
 	#Compute Z
-	Z=npy.zeros(numberofnodes)
+	Z=np.zeros(numberofnodes)
 	
 	#New mesh and Data interpolation
@@ -88,5 +88,5 @@
 	
 		#Compute index
-		index=npy.array([list(range(1,numberofnodes)),list(range(2,numberofnodes+1))]).T
+		index=np.array([list(range(1,numberofnodes)),list(range(2,numberofnodes+1))]).T
 	
 	else:
@@ -102,5 +102,5 @@
 	
 		#Some useful parameters
-		layers=int(npy.ceil(npy.mean(md.geometry.thickness)/res_v))
+		layers=int(np.ceil(np.mean(md.geometry.thickness)/res_v))
 		nodesperlayer=int(numberofnodes)
 		nodestot=int(nodesperlayer*layers)
@@ -109,9 +109,9 @@
 	
 		#initialization
-		X3=npy.zeros(nodesperlayer*layers) 
-		Y3=npy.zeros(nodesperlayer*layers) 
-		Z3=npy.zeros(nodesperlayer*layers) 
-		S3=npy.zeros(nodesperlayer*layers) 
-		index3=npy.zeros((elementstot,4))
+		X3=np.zeros(nodesperlayer*layers) 
+		Y3=np.zeros(nodesperlayer*layers) 
+		Z3=np.zeros(nodesperlayer*layers) 
+		S3=np.zeros(nodesperlayer*layers) 
+		index3=np.zeros((elementstot,4))
 	
 		#Get new coordinates in 3d
@@ -123,9 +123,9 @@
 	
 			if i<layers-1:  #Build index3 with quads
-				ids=npy.vstack((npy.arange(i,nodestot-layers,layers),npy.arange(i+1,nodestot-layers,layers),npy.arange(i+layers+1,nodestot,layers),npy.arange(i+layers,nodestot,layers))).T
+				ids=np.vstack((np.arange(i,nodestot-layers,layers),np.arange(i+1,nodestot-layers,layers),np.arange(i+layers+1,nodestot,layers),np.arange(i+layers,nodestot,layers))).T
 				index3[(i-1)*elementsperlayer:i*elementsperlayer,:]=ids
 
 		#Interpolation of data on specified points
-		data_interp=InterpFromMeshToMesh3d(md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.z,data,X3,Y3,Z3,npy.nan)
+		data_interp=InterpFromMeshToMesh3d(md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.z,data,X3,Y3,Z3,np.nan)
 	
 		#build outputs
Index: /issm/trunk-jpl/src/py3/interp/averaging.py
===================================================================
--- /issm/trunk-jpl/src/py3/interp/averaging.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/interp/averaging.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from GetAreas import GetAreas
 from scipy.sparse import csc_matrix
@@ -36,8 +36,8 @@
 	#initialization
 	if layer==0:
-		weights=npy.zeros(md.mesh.numberofvertices,)
+		weights=np.zeros(md.mesh.numberofvertices,)
 		data=data.flatten(1)
 	else:
-		weights=npy.zeros(md.mesh.numberofvertices2d,)
+		weights=np.zeros(md.mesh.numberofvertices2d,)
 		data=data[(layer-1)*md.mesh.numberofvertices2d+1:layer*md.mesh.numberofvertices2d,:]
 	
@@ -66,14 +66,14 @@
 	index=index-1 # since python indexes starting from zero
 	line=index.flatten(1)
-	areas=npy.vstack(areas).reshape(-1,)
-	summation=1./rep*npy.ones(rep,)
+	areas=np.vstack(areas).reshape(-1,)
+	summation=1./rep*np.ones(rep,)
 	linesize=rep*numberofelements
 	
 	#update weights that holds the volume of all the element holding the node i
-	weights=csc_matrix( (npy.tile(areas,(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1))
+	weights=csc_matrix( (np.tile(areas,(rep,1)).reshape(-1,),(line,np.zeros(linesize,))), shape=(numberofnodes,1))
 	
 	#initialization
 	if len(data)==numberofelements:
-		average_node=csc_matrix( (npy.tile(areas*data,(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1))
+		average_node=csc_matrix( (np.tile(areas*data,(rep,1)).reshape(-1,),(line,np.zeros(linesize,))), shape=(numberofnodes,1))
 		average_node=average_node/weights
 		average_node = csc_matrix(average_node)
@@ -82,12 +82,12 @@
 
 	#loop over iteration
-	for i in npy.arange(1,iterations+1):
-		average_el=npy.asarray(npy.dot(average_node.todense()[index].reshape(numberofelements,rep),npy.vstack(summation))).reshape(-1,)
-		average_node=csc_matrix( (npy.tile(areas*average_el.reshape(-1),(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1))
+	for i in np.arange(1,iterations+1):
+		average_el=np.asarray(np.dot(average_node.todense()[index].reshape(numberofelements,rep),np.vstack(summation))).reshape(-1,)
+		average_node=csc_matrix( (np.tile(areas*average_el.reshape(-1),(rep,1)).reshape(-1,),(line,np.zeros(linesize,))), shape=(numberofnodes,1))
 		average_node=average_node/weights
 		average_node=csc_matrix(average_node)
 	
 	#return output as a full matrix (C code do not like sparse matrices)
-	average=npy.asarray(average_node.todense()).reshape(-1,)
+	average=np.asarray(average_node.todense()).reshape(-1,)
 
 	return average
Index: /issm/trunk-jpl/src/py3/interp/holefiller.py
===================================================================
--- /issm/trunk-jpl/src/py3/interp/holefiller.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/interp/holefiller.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from scipy.spatial import cKDTree
 
@@ -30,6 +30,6 @@
 	filled=data
 	
-	XYGood=npy.dstack([x[goodids],y[goodids]])[0]
-	XYBad=npy.dstack([x[badids],y[badids]])[0]
+	XYGood=np.dstack([x[goodids],y[goodids]])[0]
+	XYBad=np.dstack([x[badids],y[badids]])[0]
 	tree=cKDTree(XYGood)
 	nearest=tree.query(XYBad,k=knn)[1]
@@ -42,5 +42,5 @@
 			for j in range(knn):
 				neardat.append(filled[goodids][nearest[i][j]])
-				filled[badids[i]]=npy.mean(neardat)
+				filled[badids[i]]=np.mean(neardat)
 				
 	return filled
Index: /issm/trunk-jpl/src/py3/interp/interp.py
===================================================================
--- /issm/trunk-jpl/src/py3/interp/interp.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/interp/interp.py	(revision 21255)
@@ -1,4 +1,4 @@
 # module for inperpolating/smoothing data
-import numpy as npy
+import numpy as np
 from scipy.interpolate import CloughTocher2DInterpolator, Rbf
 from scipy.spatial import cKDTree
@@ -51,19 +51,19 @@
 	xlim=[min(xi)-dx,max(xi)+dx]
 	ylim=[min(yi)-dy,max(yi)+dy]
-	xflag=npy.logical_and(x>xlim[0],x<xlim[1])
-	yflag=npy.logical_and(y>ylim[0],y<ylim[1])
-	bothind=npy.nonzero(npy.logical_and(xflag,yflag))
+	xflag=np.logical_and(x>xlim[0],x<xlim[1])
+	yflag=np.logical_and(y>ylim[0],y<ylim[1])
+	bothind=np.nonzero(np.logical_and(xflag,yflag))
 	subdata=data[bothind]
 	subx=x[bothind]
 	suby=y[bothind]
-	points=npy.array([subx,suby]).T
+	points=np.array([subx,suby]).T
 
 	# mask out any nan's in the data and corresponding coordinate points
-	mask=npy.isnan(subdata)
-	ind=npy.nonzero(mask)[0]
+	mask=np.isnan(subdata)
+	ind=np.nonzero(mask)[0]
 	if len(ind) and fill_nans:
 		print("		WARNING: filling nans using spline fit through good data points, which may or may not be appropriate. Check results carefully.")
-	subdata=npy.delete(subdata,ind)
-	points=npy.delete(points,ind,axis=0)
+	subdata=np.delete(subdata,ind)
+	points=np.delete(points,ind,axis=0)
 
 	if maxiter:
@@ -76,15 +76,15 @@
 	if not fill_nans:
 		# identify nan's in xi,yi using nearest neighbors
-		xyinterp=npy.dstack([xi,yi])[0]
-		xg,yg=npy.meshgrid(subx,suby)
-		xydata=npy.dstack([subx,suby])[0]
+		xyinterp=np.dstack([xi,yi])[0]
+		xg,yg=np.meshgrid(subx,suby)
+		xydata=np.dstack([subx,suby])[0]
 		tree=cKDTree(xydata)
 		nearest=tree.query(xyinterp)[1]
-		pos=npy.nonzero(npy.isnan(subdata[nearest]))
+		pos=np.nonzero(np.isnan(subdata[nearest]))
 		interpdata[pos]=subdata[nearest][pos]
 
 	return interpdata
 #}}}
-def GridSplineToMesh2d(x,y,data,xi,yi,default_value=npy.nan,plotonly=False,fill_nans=False):#{{{
+def GridSplineToMesh2d(x,y,data,xi,yi,default_value=np.nan,plotonly=False,fill_nans=False):#{{{
 	'''
 	python analog to InterpFromGridToMesh.  This routine uses
@@ -108,5 +108,5 @@
 
 	Usage:
-		interpdata=GridToMesh(x,y,data,xi,yi,default_value=npy.nan,plotonly=False,fill_nans=False)
+		interpdata=GridToMesh(x,y,data,xi,yi,default_value=np.nan,plotonly=False,fill_nans=False)
 
 	Examples:
@@ -114,7 +114,7 @@
 	'''
 
-	if npy.ndim(x)==2:
+	if np.ndim(x)==2:
 		x=x.reshape(-1,)
-	if npy.ndim(y)==2:
+	if np.ndim(y)==2:
 		y=y.reshape(-1,)
 	if len(x) != data.shape[1]+1 and len(x) != data.shape[1]:
@@ -134,34 +134,34 @@
 	if len(x)==data.shape[1] and len(y)==data.shape[0]:
 		print('		x,y taken to define the center of data grid cells')
-		xind=npy.nonzero(npy.logical_and(x>xlim[0],x<xlim[1]))[0]
-		yind=npy.nonzero(npy.logical_and(y>ylim[0],y<ylim[1]))[0]
-		xg,yg=npy.meshgrid(x[xind],y[yind])
+		xind=np.nonzero(np.logical_and(x>xlim[0],x<xlim[1]))[0]
+		yind=np.nonzero(np.logical_and(y>ylim[0],y<ylim[1]))[0]
+		xg,yg=np.meshgrid(x[xind],y[yind])
 		subdata=data[yind[0]:yind[-1]+1,xind[0]:xind[-1]+1]
 	elif len(x)==data.shape[1]+1 and len(y)==data.shape[0]+1:
 		print('		x,y taken to define the corners of data grid cells')
-		xcenter=npy.fromiter(((x[i]+x[i+1])/2 for i in range(len(x)-1)),npy.float)
-		ycenter=npy.fromiter(((y[i]+y[i+1])/2 for i in range(len(y)-1)),npy.float)
-		xind=npy.nonzero(npy.logical_and(xcenter>xlim[0],xcenter<xlim[1]))[0]
-		yind=npy.nonzero(npy.logical_and(ycenter>ylim[0],ycenter<ylim[1]))[0]
-		xg,yg=npy.meshgrid(xcenter[xind],ycenter[yind])
+		xcenter=np.fromiter(((x[i]+x[i+1])/2 for i in range(len(x)-1)),np.float)
+		ycenter=np.fromiter(((y[i]+y[i+1])/2 for i in range(len(y)-1)),np.float)
+		xind=np.nonzero(np.logical_and(xcenter>xlim[0],xcenter<xlim[1]))[0]
+		yind=np.nonzero(np.logical_and(ycenter>ylim[0],ycenter<ylim[1]))[0]
+		xg,yg=np.meshgrid(xcenter[xind],ycenter[yind])
 		subdata=data[yind[0]:yind[-1]+1,xind[0]:xind[-1]+1]
 	else:
 		raise ValueError('x and y have inconsistent sizes: both should have length ncols(data)/nrows(data) or ncols(data)+1/nrows(data)+1')
 
-	points=npy.array([xg.ravel(),yg.ravel()]).T
+	points=np.array([xg.ravel(),yg.ravel()]).T
 	flatsubdata=subdata.ravel()
 
 	if plotonly:
-		plt.imshow(npy.flipud(subdata),origin='upper')
+		plt.imshow(np.flipud(subdata),origin='upper')
 		plt.show()
 		return
 
 	# mask out any nan's in the data and corresponding coordinate points
-	mask=npy.isnan(flatsubdata)
-	ind=npy.nonzero(mask)[0]
+	mask=np.isnan(flatsubdata)
+	ind=np.nonzero(mask)[0]
 	if len(ind) and fill_nans:
 		print("		WARNING: filling nans using spline fit through good data points, which may or may not be appropriate. Check results carefully.")
-	goodsubdata=npy.delete(flatsubdata,ind)
-	goodpoints=npy.delete(points,ind,axis=0)
+	goodsubdata=np.delete(flatsubdata,ind)
+	goodpoints=np.delete(points,ind,axis=0)
 
 	# create spline and index spline at mesh points
@@ -171,9 +171,9 @@
 	if not fill_nans:
 		# identify nan's in xi,yi using nearest neighbors
-		xyinterp=npy.dstack([xi,yi])[0]
-		xydata=npy.dstack([xg.ravel(),yg.ravel()])[0]
+		xyinterp=np.dstack([xi,yi])[0]
+		xydata=np.dstack([xg.ravel(),yg.ravel()])[0]
 		tree=cKDTree(xydata)
 		nearest=tree.query(xyinterp)[1]
-		pos=npy.nonzero(npy.isnan(flatsubdata[nearest]))
+		pos=np.nonzero(np.isnan(flatsubdata[nearest]))
 		interpdata[pos]=flatsubdata[nearest][pos]
 
Index: /issm/trunk-jpl/src/py3/inversions/parametercontroldrag.py
===================================================================
--- /issm/trunk-jpl/src/py3/inversions/parametercontroldrag.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/inversions/parametercontroldrag.py	(revision 21255)
@@ -26,6 +26,6 @@
 
 	#weights
-	weights=options.getfieldvalue('weights',npy.ones(md.mesh.numberofvertices))
-	if npy.size(weights)!=md.mesh.numberofvertices:
+	weights=options.getfieldvalue('weights',np.ones(md.mesh.numberofvertices))
+	if np.size(weights)!=md.mesh.numberofvertices:
 		md.inversion.cost_functions_coefficients=ones(md.mesh.numberofvertices)
 	else:
@@ -34,5 +34,5 @@
 	#nsteps
 	nsteps=options.getfieldvalue('nsteps',100);
-	if (npy.size(nsteps)!=1) | (nsteps<=0) | (floor(nsteps)!=nsteps):
+	if (np.size(nsteps)!=1) | (nsteps<=0) | (floor(nsteps)!=nsteps):
 		md.inversion.nsteps=100
 	else:
@@ -41,7 +41,7 @@
 	#cm_min
 	cm_min=options.getfieldvalue('cm_min',ones(md.mesh.numberofvertices))
-	if (npy.size(cm_min)==1):
+	if (np.size(cm_min)==1):
 		md.inversion.min_parameters=cm_min*ones(md.mesh.numberofvertices)
-	elif (npy.size(cm_min)==md.mesh.numberofvertices):
+	elif (np.size(cm_min)==md.mesh.numberofvertices):
 		md.inversion.min_parameters=cm_min
 	else:
@@ -50,7 +50,7 @@
 	#cm_max
 	cm_max=options.getfieldvalue('cm_max',250*ones(md.mesh.numberofvertices))
-	if (npy.size(cm_max)==1):
+	if (np.size(cm_max)==1):
 		md.inversion.max_parameters=cm_max*ones(md.mesh.numberofvertices)
-	elif (npy.size(cm_max)==md.mesh.numberofvertices):
+	elif (np.size(cm_max)==md.mesh.numberofvertices):
 		md.inversion.max_parameters=cm_max
 	else:
@@ -59,5 +59,5 @@
 	#eps_cm
 	eps_cm=optoins.getfieldvalue('eps_cm',float('nan'))
-	if (npy.size(eps_cm)~=1 | eps_cm<0 ):
+	if (np.size(eps_cm)~=1 | eps_cm<0 ):
 		md.inversion.cost_function_threshold=float('nan')
 	else:
@@ -66,5 +66,5 @@
 	#maxiter
 	maxiter=options.getfieldvalue('maxiter',10*ones(md.inversion.nsteps))
-	if (npy.any(maxiter<0) | npy.any(floor(maxiter)~=maxiter)):
+	if (np.any(maxiter<0) | np.any(floor(maxiter)~=maxiter)):
 		md.inversion.maxiter_per_step=10*ones(md.inversion.nsteps)
 	else:
@@ -75,5 +75,5 @@
 	#cm_jump
 	cm_jump=options.getfieldvalue('cm_jump',0.8*ones(md.inversion.nsteps))
-	if !npy.isreal(cm_jump):
+	if !np.isreal(cm_jump):
 		md.inversion.step_threshold=0.8*ones(md.inversion.nsteps)
 	else:
Index: /issm/trunk-jpl/src/py3/materials/DepthAvgTempCond.py
===================================================================
--- /issm/trunk-jpl/src/py3/materials/DepthAvgTempCond.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/materials/DepthAvgTempCond.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from TMeltingPoint  import TMeltingPoint
 
@@ -16,17 +16,17 @@
    alpha=G*H/k
 
-   Tbar=npy.zeros(md.mesh.numberofvertices,)
+   Tbar=np.zeros(md.mesh.numberofvertices,)
 
    #find temperature average when we are below melting point: 
-   pos=npy.nonzero( Ts+alpha < Tpmp)
+   pos=np.nonzero( Ts+alpha < Tpmp)
    if pos:
 	   Tbar[pos]=Ts[pos]+alpha[pos]/2 
 
-   pos=npy.nonzero( Ts+alpha>= Tpmp)
+   pos=np.nonzero( Ts+alpha>= Tpmp)
    if pos:
 	   Tbar[pos]=Tpmp+(Tpmp**2-Ts[pos]**2)/2/alpha[pos]+ Tpmp*(Ts[pos]-Tpmp)/alpha[pos]
    
    #on ice shelf, easier: 
-   pos=npy.nonzero(md.mask.groundedice_levelset[0]<=0)
+   pos=np.nonzero(md.mask.groundedice_levelset[0]<=0)
    if pos:
 	   Tbar[pos]=(Ts[pos]+Tpmp)/2
Index: /issm/trunk-jpl/src/py3/materials/TMeltingPoint.py
===================================================================
--- /issm/trunk-jpl/src/py3/materials/TMeltingPoint.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/materials/TMeltingPoint.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def TMeltingPoint(reftemp,pressure):
@@ -17,5 +17,5 @@
 
 	#ensure ref is same dimension as pressure
-	ref=reftemp*npy.ones_like(pressure)
+	ref=reftemp*np.ones_like(pressure)
 
 	return reftemp-beta*pressure
Index: /issm/trunk-jpl/src/py3/mech/analyticaldamage.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/analyticaldamage.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/analyticaldamage.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 #from plotmodel import plotmodel
@@ -54,5 +54,5 @@
 
 	if isinstance(sigmab,(int,float)):
-		sigmab=sigmab*npy.ones((md.mesh.numberofvertices,))
+		sigmab=sigmab*np.ones((md.mesh.numberofvertices,))
 
 	# check inputs
@@ -61,5 +61,5 @@
 	if not '2d' in md.mesh.__doc__:
 		raise Exception('only 2d (planview) model supported currently')
-	if npy.any(md.flowequation.element_equation!=2):
+	if np.any(md.flowequation.element_equation!=2):
 		print('Warning: the model has some non SSA elements. These will be treated like SSA elements')
 
@@ -76,28 +76,28 @@
 	n=averaging(md,md.materials.rheology_n,0)
 	
-	D=1.-(1.+a+a**2+b**2)**((n-1.)/(2.*n))/npy.abs(ex)**(1./n)*(T-sigmab)/B/(2.+a)/npy.sign(ex)
+	D=1.-(1.+a+a**2+b**2)**((n-1.)/(2.*n))/np.abs(ex)**(1./n)*(T-sigmab)/B/(2.+a)/np.sign(ex)
 	
 	# D>1 where (2+a).*sign(ex)<0, compressive regions where high backstress needed
-	pos=npy.nonzero(D>1)
+	pos=np.nonzero(D>1)
 	D[pos]=0
 	
-	backstress=npy.zeros((md.mesh.numberofvertices,))
+	backstress=np.zeros((md.mesh.numberofvertices,))
 
 	# backstress to bring D down to one 
-	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*npy.sign(ex[pos])*(2.+a[pos])*npy.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
+	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*np.sign(ex[pos])*(2.+a[pos])*np.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
 	
-	pos=npy.nonzero(D<0)
+	pos=np.nonzero(D<0)
 	#mask=ismember(1:md.mesh.numberofvertices,pos);
 	D[pos]=0
 	
 	# backstress to bring negative damage to zero
-	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*npy.sign(ex[pos])*(2.+a[pos])*npy.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
+	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*np.sign(ex[pos])*(2.+a[pos])*np.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
 	
-	pos=npy.nonzero(backstress<0)
+	pos=np.nonzero(backstress<0)
 	backstress[pos]=0
 	
 	# rigidity from Thomas relation for D=0 and backstress=0
-	B=npy.sign(ex)/(2.+a)*(1.+a+a**2)**((n-1.)/2./n)*T/(npy.abs(ex)**(1./n))
-	pos=npy.nonzero(B<0)
+	B=np.sign(ex)/(2.+a)*(1.+a+a**2)**((n-1.)/2./n)*T/(np.abs(ex)**(1./n))
+	pos=np.nonzero(B<0)
 	B[pos]=md.materials.rheology_B[pos]
 	
Index: /issm/trunk-jpl/src/py3/mech/backstressfrominversion.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/backstressfrominversion.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/backstressfrominversion.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 from thomasparams import thomasparams
@@ -65,10 +65,10 @@
 	if tempmask:
 		Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar
-		pos=npy.nonzero(Bi>md.materials.rheology_B)
+		pos=np.nonzero(Bi>md.materials.rheology_B)
 		Bi[pos]=md.materials.rheology_B[pos]
 	
 	# analytical backstress solution
-	backstress=T-Bi*npy.sign(ex0)*(2+a0)*npy.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
-	backstress[npy.nonzero(backstress<0)]=0
+	backstress=T-Bi*np.sign(ex0)*(2+a0)*np.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
+	backstress[np.nonzero(backstress<0)]=0
 
 	return backstress
Index: /issm/trunk-jpl/src/py3/mech/calcbackstress.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/calcbackstress.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/calcbackstress.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 from thomasparams import thomasparams
@@ -61,6 +61,6 @@
 	
 	# analytical backstress solution
-	backstress=T-(1.-D)*B*npy.sign(ex0)*(2+a0)*npy.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
-	backstress[npy.nonzero(backstress<0)]=0
+	backstress=T-(1.-D)*B*np.sign(ex0)*(2+a0)*np.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
+	backstress[np.nonzero(backstress<0)]=0
 
 	return backstress
Index: /issm/trunk-jpl/src/py3/mech/damagefrominversion.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/damagefrominversion.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/damagefrominversion.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def damagefrominversion(md):
@@ -24,20 +24,20 @@
 	if any(md.flowequation.element_equation!=2):
 		raise Exception('Warning: the model has some non-SSA elements.  These will be treated like SSA elements')
-	if npy.ndim(md.results.StressbalanceSolution.MaterialsRheologyBbar)==2:
+	if np.ndim(md.results.StressbalanceSolution.MaterialsRheologyBbar)==2:
 		Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar.reshape(-1,)
 	else:
 		Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar
-	if npy.ndim(md.materials.rheology_B)==2:
+	if np.ndim(md.materials.rheology_B)==2:
 		BT=md.materials.rheology_B.reshape(-1,)
 	else:
 		BT=md.materials.rheology_B
 
-	damage=npy.zeros_like(Bi)
+	damage=np.zeros_like(Bi)
 
 	# Damage where Bi softer than B(T)
-	pos=npy.nonzero(Bi<BT)[0]
+	pos=np.nonzero(Bi<BT)[0]
 	damage[pos]=1.-Bi[pos]/BT[pos]
 	
-	pos=npy.nonzero(damage<0)
+	pos=np.nonzero(damage<0)
 	damage[pos]=0
 
Index: /issm/trunk-jpl/src/py3/mech/mechanicalproperties.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/mechanicalproperties.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/mechanicalproperties.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from GetNodalFunctionsCoeff import GetNodalFunctionsCoeff
 from results import results
@@ -27,5 +27,5 @@
 	#	raise StandardError('only 2D model supported currently')
 
-	if npy.any(md.flowequation.element_equation!=2):
+	if np.any(md.flowequation.element_equation!=2):
 		print('Warning: the model has some non SSA elements. These will be treated like SSA elements')
 
@@ -35,11 +35,11 @@
             if len(damage)!=md.mesh.numberofvertices:
 		raise ValueError('if damage is supplied it should be of size ' + md.mesh.numberofvertices)
-            if npy.ndim(damage)==2:
+            if np.ndim(damage)==2:
                 damage=damage.reshape(-1,)
         else: damage=None
 
-	if npy.ndim(vx)==2:
+	if np.ndim(vx)==2:
 		vx=vx.reshape(-1,)
-	if npy.ndim(vy)==2:
+	if np.ndim(vy)==2:
 		vy=vy.reshape(-1,)
 	
@@ -48,9 +48,9 @@
 	numberofvertices=md.mesh.numberofvertices
 	index=md.mesh.elements
-	summation=npy.array([[1],[1],[1]])
-	directionsstress=npy.zeros((numberofelements,4))
-	directionsstrain=npy.zeros((numberofelements,4))
-	valuesstress=npy.zeros((numberofelements,2))
-	valuesstrain=npy.zeros((numberofelements,2))
+	summation=np.array([[1],[1],[1]])
+	directionsstress=np.zeros((numberofelements,4))
+	directionsstrain=np.zeros((numberofelements,4))
+	valuesstress=np.zeros((numberofelements,2))
+	valuesstrain=np.zeros((numberofelements,2))
 	
 	#compute nodal functions coefficients N(x,y)=alpha x + beta y +gamma
@@ -60,34 +60,34 @@
 	vxlist=vx[index-1]/md.constants.yts
 	vylist=vy[index-1]/md.constants.yts
-	ux=npy.dot((vxlist*alpha),summation).reshape(-1,)
-	uy=npy.dot((vxlist*beta),summation).reshape(-1,)
-	vx=npy.dot((vylist*alpha),summation).reshape(-1,)
-	vy=npy.dot((vylist*beta),summation).reshape(-1,)
+	ux=np.dot((vxlist*alpha),summation).reshape(-1,)
+	uy=np.dot((vxlist*beta),summation).reshape(-1,)
+	vx=np.dot((vylist*alpha),summation).reshape(-1,)
+	vy=np.dot((vylist*beta),summation).reshape(-1,)
 	uyvx=(vx+uy)/2.
 	#clear vxlist vylist
 	
 	#compute viscosity
-	nu=npy.zeros((numberofelements,))
-	B_bar=npy.dot(md.materials.rheology_B[index-1],summation/3.).reshape(-1,)
+	nu=np.zeros((numberofelements,))
+	B_bar=np.dot(md.materials.rheology_B[index-1],summation/3.).reshape(-1,)
 	power=((md.materials.rheology_n-1.)/(2.*md.materials.rheology_n)).reshape(-1,)
 	second_inv=(ux**2.+vy**2.+((uy+vx)**2.)/4.+ux*vy).reshape(-1,)
 	
 	#some corrections
-	location=npy.nonzero(npy.logical_and(second_inv==0,power!=0))
+	location=np.nonzero(np.logical_and(second_inv==0,power!=0))
 	nu[location]=10^18 	#arbitrary maximum viscosity to apply where there is no effective shear
 	
 	if 'matice' in md.materials.__module__:
-		location=npy.nonzero(second_inv)
+		location=np.nonzero(second_inv)
 		nu[location]=B_bar[location]/(second_inv[location]**power[location])
-		location=npy.nonzero(npy.logical_and(second_inv==0,power==0))
+		location=np.nonzero(np.logical_and(second_inv==0,power==0))
 		nu[location]=B_bar[location]
-		location=npy.nonzero(npy.logical_and(second_inv==0,power!=0))
+		location=np.nonzero(np.logical_and(second_inv==0,power!=0))
 		nu[location]=10^18
 	elif 'matdamageice' in md.materials.__module__ and damage is not None:
 		print('computing damage-dependent properties!')
-		Zinv=npy.dot(1-damage[index-1],summation/3.).reshape(-1,)
-		location=npy.nonzero(second_inv)
-		nu[location]=Zinv[location]*B_bar[location]/npy.power(second_inv[location],power[location])
-		location=npy.nonzero(npy.logical_and(second_inv==0,power==0))
+		Zinv=np.dot(1-damage[index-1],summation/3.).reshape(-1,)
+		location=np.nonzero(second_inv)
+		nu[location]=Zinv[location]*B_bar[location]/np.power(second_inv[location],power[location])
+		location=np.nonzero(np.logical_and(second_inv==0,power==0))
 		nu[location]=Zinv[location]*B_bar[location]
 		#clear Zinv
@@ -101,12 +101,12 @@
 	
 	#compute principal properties of stress
-	for i in npy.arange(numberofelements):
+	for i in np.arange(numberofelements):
 	
 		#compute stress and strainrate matrices
-		stress=npy.array([ [tau_xx[i], tau_xy[i]], [tau_xy[i], tau_yy[i]] ])
-		strain=npy.array([ [ux[i], uyvx[i]], [uyvx[i], vy[i]] ])
+		stress=np.array([ [tau_xx[i], tau_xy[i]], [tau_xy[i], tau_yy[i]] ])
+		strain=np.array([ [ux[i], uyvx[i]], [uyvx[i], vy[i]] ])
 	
 		#eigenvalues and vectors for stress
-		value,directions=npy.linalg.eig(stress);
+		value,directions=np.linalg.eig(stress);
 		idx=abs(value).argsort()[::-1] # sort in descending order
 		value=value[idx]
@@ -116,5 +116,5 @@
 
 		#eigenvalues and vectors for strain
-		value,directions=npy.linalg.eig(strain);
+		value,directions=np.linalg.eig(strain);
 		idx=abs(value).argsort()[::-1] # sort in descending order
 		value=value[idx]
@@ -133,5 +133,5 @@
 	stress.principalvalue2=valuesstress[:,1]
 	stress.principalaxis2=directionsstress[:,2:4]
-	stress.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
+	stress.effectivevalue=1./np.sqrt(2.)*np.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
 	md.results.stress=stress
 	
@@ -144,5 +144,5 @@
 	strainrate.principalvalue2=valuesstrain[:,1]*md.constants.yts 
 	strainrate.principalaxis2=directionsstrain[:,2:4]
-	strainrate.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(strainrate.xx**2+strainrate.yy**2+2.*strainrate.xy**2)
+	strainrate.effectivevalue=1./np.sqrt(2.)*np.sqrt(strainrate.xx**2+strainrate.yy**2+2.*strainrate.xy**2)
 	md.results.strainrate=strainrate
 	
@@ -155,5 +155,5 @@
 	deviatoricstress.principalvalue2=valuesstress[:,1]
 	deviatoricstress.principalaxis2=directionsstress[:,2:4]
-	deviatoricstress.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
+	deviatoricstress.effectivevalue=1./np.sqrt(2.)*np.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
 	md.results.deviatoricstress=deviatoricstress
 
Index: /issm/trunk-jpl/src/py3/mech/robintemperature.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/robintemperature.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/robintemperature.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from scipy.special import erf
 
@@ -34,7 +34,7 @@
 	
 	#create vertical coordinate variable
-	zstar=npy.sqrt(2.*alphaT*thickness/accumrate)
+	zstar=np.sqrt(2.*alphaT*thickness/accumrate)
 	
-	tprofile=surftemp+npy.sqrt(2.*thickness*npy.pi/accumrate/alphaT)*(-heatflux)/2./rho/c*(erf(z/zstar)-erf(thickness/zstar))
+	tprofile=surftemp+np.sqrt(2.*thickness*np.pi/accumrate/alphaT)*(-heatflux)/2./rho/c*(erf(z/zstar)-erf(thickness/zstar))
 
 	return tprofile	
Index: /issm/trunk-jpl/src/py3/mech/steadystateiceshelftemp.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/steadystateiceshelftemp.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/steadystateiceshelftemp.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def steadystateiceshelftemp(md,surfacetemp,basaltemp):
@@ -42,20 +42,20 @@
 	temperature=(Ts+Tb)/2  # where wi~=0
 	
-	pos=npy.nonzero(abs(wi)>=1e-4) # to avoid division by zero
+	pos=np.nonzero(abs(wi)>=1e-4) # to avoid division by zero
 
-	npy.seterr(over='raise',divide='raise') # raise errors if floating point exceptions are encountered in following calculation
+	np.seterr(over='raise',divide='raise') # raise errors if floating point exceptions are encountered in following calculation
 	#calculate depth-averaged temperature (in Celsius)
 	try:
-		temperature[pos]=-( (Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos] - (Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])*npy.exp(Hi[pos]*wi[pos]/ki) )/( Hi[pos]*(npy.exp(Hi[pos]*wi[pos]/ki)-1))
+		temperature[pos]=-( (Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos] - (Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])*np.exp(Hi[pos]*wi[pos]/ki) )/( Hi[pos]*(np.exp(Hi[pos]*wi[pos]/ki)-1))
 	except FloatingPointError:
 		print('steadystateiceshelf warning: overflow encountered in multipy/divide/exp, trying another formulation.') 
-		temperature[pos]=-( ((Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos])/npy.exp(Hi[pos]*wi[pos]/ki) - Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])/( Hi[pos]*(1-npy.exp(-Hi[pos]*wi[pos]/ki)))
+		temperature[pos]=-( ((Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos])/np.exp(Hi[pos]*wi[pos]/ki) - Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])/( Hi[pos]*(1-np.exp(-Hi[pos]*wi[pos]/ki)))
 	
 	#temperature should not be less than surface temp
-	pos=npy.nonzero(temperature<Ts)
+	pos=np.nonzero(temperature<Ts)
 	temperature[pos]=Ts[pos]
 	
 	# NaN where melt rates are too high (infinity/infinity in exponential)
-	pos=npy.nonzero(npy.isnan(temperature))
+	pos=np.nonzero(np.isnan(temperature))
 	temperature[pos]=Ts[pos]
 	
Index: /issm/trunk-jpl/src/py3/mech/thomasparams.py
===================================================================
--- /issm/trunk-jpl/src/py3/mech/thomasparams.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/mech/thomasparams.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 
@@ -75,10 +75,10 @@
 	
 	# checks: any of e1 or e2 equal to zero?
-	pos=npy.nonzero(e1==0)
-	if npy.any(pos==1):
+	pos=np.nonzero(e1==0)
+	if np.any(pos==1):
 		print('WARNING: first principal strain rate equal to zero.  Value set to 1e-13 s^-1')
 		e1[pos]=1.e-13
-	pos=npy.nonzero(e2==0)
-	if npy.any(pos==1):
+	pos=np.nonzero(e2==0)
+	if np.any(pos==1):
 		print('WARNING: second principal strain rate equal to zero.  Value set to 1e-13 s^-1')
 		e2[pos]=1.e-13
@@ -89,19 +89,19 @@
 	
 	if coordsys=='principal':
-		b=npy.zeros((md.mesh.numberofvertices,))
+		b=np.zeros((md.mesh.numberofvertices,))
 		ex=e1
 		a=e2/e1
-		pos=npy.nonzero(npy.logical_and(e1<0,e2>0)) # longitudinal compression and lateral tension
+		pos=np.nonzero(np.logical_and(e1<0,e2>0)) # longitudinal compression and lateral tension
 		a[pos]=e1[pos]/e2[pos]
 		ex[pos]=e2[pos]
-		pos2=npy.nonzero(e1<0 & e2<0 & npy.abs(e1)<npy.abs(e2)) # lateral and longitudinal compression
+		pos2=np.nonzero(e1<0 & e2<0 & np.abs(e1)<np.abs(e2)) # lateral and longitudinal compression
 		a[pos2]=e1[pos2]/e2[pos2]
 		ex[pos2]=e2[pos2]
-		pos3=npy.nonzero(e1>0 & e2>0 & npy.abs(e1)<npy.abs(e2)) # lateral and longitudinal tension
+		pos3=np.nonzero(e1>0 & e2>0 & np.abs(e1)<np.abs(e2)) # lateral and longitudinal tension
 		a[pos3]=e1[pos3]/e2[pos3]
 		ex[pos3]=e2[pos3]
-		ind=npy.nonzero(e1<0 & e2<0)
+		ind=np.nonzero(e1<0 & e2<0)
 		a[ind]=-a[ind] # where both strain rates are compressive, enforce negative alpha
-		sigxx=(npy.abs(ex)/((1.+a+a**2)**((n-1.)/2.)))**(1./n)*B
+		sigxx=(np.abs(ex)/((1.+a+a**2)**((n-1.)/2.)))**(1./n)*B
 	elif coordsys=='xy':
 		ex=exx
@@ -110,10 +110,10 @@
 	elif coordsys=='longitudinal':
 		# using longitudinal strain rates defined by observed velocity vector
-		velangle=npy.arctan(md.initialization.vy/md.initialization.vx)
-		pos=npy.nonzero(md.initialization.vx==0)
-		velangle[pos]=npy.pi/2
-		ex=0.5*(exx+eyy)+0.5*(exx-eyy)*npy.cos(2.*velangle)+exy*npy.sin(2.*velangle)
+		velangle=np.arctan(md.initialization.vy/md.initialization.vx)
+		pos=np.nonzero(md.initialization.vx==0)
+		velangle[pos]=np.pi/2
+		ex=0.5*(exx+eyy)+0.5*(exx-eyy)*np.cos(2.*velangle)+exy*np.sin(2.*velangle)
 		ey=exx+eyy-ex # trace of strain rate tensor is invariant
-		exy=-0.5*(exx-eyy)*npy.sin(2.*velangle)+exy*npy.cos(2.*velangle)
+		exy=-0.5*(exx-eyy)*np.sin(2.*velangle)+exy*np.cos(2.*velangle)
 		a=ey/ex
 		b=exy/ex
@@ -124,5 +124,5 @@
 	# a < -1 in areas of strong lateral compression or longitudinal compression and
 	# theta flips sign at a = -2
-	pos=npy.nonzero(npy.abs((npy.abs(a)-2.))<1.e-3)
+	pos=np.nonzero(np.abs((np.abs(a)-2.))<1.e-3)
 	if len(pos)>0:
 		print('Warning: ', len(pos), ' vertices have alpha within 1e-3 of -2')
@@ -131,10 +131,10 @@
 	if eq=='Weertman1D':
 		theta=1./8
-		a=npy.zeros((md.mesh.numberofvertices,))
+		a=np.zeros((md.mesh.numberofvertices,))
 	elif eq=='Weertman2D':
 		theta=1./9
-		a=npy.ones((md.mesh.numberofvertices,))
+		a=np.ones((md.mesh.numberofvertices,))
 	elif eq=='Thomas':
-		theta=((1.+a+a**2+b**2)**((n-1.)/2.))/(npy.abs(2.+a)**n)
+		theta=((1.+a+a**2+b**2)**((n-1.)/2.))/(np.abs(2.+a)**n)
 	else:
 		raise ValueError('argument passed to "eq" not valid')
Index: /issm/trunk-jpl/src/py3/plot/applyoptions.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/applyoptions.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/applyoptions.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from cmaptools import truncate_colormap
 from plot_contour import plot_contour
@@ -230,13 +230,13 @@
 	cb.locator=MaxNLocator(nbins=5) # default 5 ticks
 	if options.exist('colorbartickspacing'):
-		locs=npy.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbartickspacing'))
+		locs=np.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbartickspacing'))
 		cb.set_ticks(locs)
 		if options.exist('colorbarlines'):
-			locs=npy.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbarlines'))
-			cb.add_lines(locs,['k' for i in range(len(locs))],npy.ones_like(locs))
+			locs=np.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbarlines'))
+			cb.add_lines(locs,['k' for i in range(len(locs))],np.ones_like(locs))
 			if options.exist('colorbarlineatvalue'):
 				locs=options.getfieldvalue('colorbarlineatvalue')
 				colors=options.getfieldvalue('colorbarlineatvaluecolor',['k' for i in range (len(locs))])
-				widths=options.getfieldvalue('colorbarlineatvaluewidth',npy.ones_like(locs))
+				widths=options.getfieldvalue('colorbarlineatvaluewidth',np.ones_like(locs))
 				cb.add_lines(locs,colors,widths)
 				if options.exist('colorbartitle'):
Index: /issm/trunk-jpl/src/py3/plot/checkplotoptions.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/checkplotoptions.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/checkplotoptions.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def checkplotoptions(md,options):
@@ -78,5 +78,5 @@
 		sizelist=[12]
 	if len(sizelist)==1:
-		sizelist=npy.tile(sizelist,numtext)
+		sizelist=np.tile(sizelist,numtext)
 
 		# font color
@@ -88,5 +88,5 @@
 			colorlist=['k']
 		if len(colorlist)==1:
-			colorlist=npy.tile(colorlist,numtext)
+			colorlist=np.tile(colorlist,numtext)
 
 		# textweight
@@ -98,5 +98,5 @@
 			weightlist=['normal']
 		if len(weightlist)==1:
-			weightlist=npy.tile(weightlist,numtext)
+			weightlist=np.tile(weightlist,numtext)
 
 		# text rotation
@@ -108,5 +108,5 @@
 			rotationlist=[0]
 		if len(rotationlist)==1:
-			rotationlist=npy.tile(rotationlist,numtext)
+			rotationlist=np.tile(rotationlist,numtext)
 
 		options.changefieldvalue('text',textlist)
@@ -130,5 +130,5 @@
 		if type(expdispvalues)==str:
 			expdispvalues=[expdispvalues]
-		for i in npy.arange(len(expdispvalues)):
+		for i in np.arange(len(expdispvalues)):
 			expdispvaluesarray.append(expdispvalues[i])
 			if len(expstylevalues)>i:
Index: /issm/trunk-jpl/src/py3/plot/colormaps/cmaptools.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/colormaps/cmaptools.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/colormaps/cmaptools.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 try:
@@ -21,5 +21,5 @@
 
 	new_cmap = mpl.colors.LinearSegmentedColormap.from_list('trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name,
-		a=minval, b=maxval), cmap(npy.linspace(minval, maxval, n)))
+		a=minval, b=maxval), cmap(np.linspace(minval, maxval, n)))
 	
 	return new_cmap
Index: /issm/trunk-jpl/src/py3/plot/plot_contour.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/plot_contour.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/plot_contour.py	(revision 21255)
@@ -23,5 +23,5 @@
 		pass
 	elif datatype==3: # quiver (vector) data
-		data=npy.sqrt(datain**2)
+		data=np.sqrt(datain**2)
 	else:
 		raise ValueError('datatype not supported in call to plot_contour')
Index: /issm/trunk-jpl/src/py3/plot/plot_overlay.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/plot_overlay.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/plot_overlay.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from processmesh import processmesh
 from processdata import processdata
@@ -28,5 +28,5 @@
 	if data=='none' or data==None:
 		imageonly=1
-		data=npy.float('nan')*npy.ones((md.mesh.numberofvertices,))
+		data=np.float('nan')*np.ones((md.mesh.numberofvertices,))
 		datatype=1
 	else:
@@ -70,5 +70,5 @@
 
 	# normalize array
-	arr=arr/npy.float(npy.max(arr.ravel()))
+	arr=arr/np.float(np.max(arr.ravel()))
         arr=1.-arr # somehow the values got flipped
 
@@ -96,7 +96,7 @@
 	dy=trans[5]	
 	
-	xarr=npy.arange(xmin,xmax,dx)
-	yarr=npy.arange(ymin,ymax,-dy) # -dy since origin='upper' (not sure how robust this is)
-	xg,yg=npy.meshgrid(xarr,yarr)
+	xarr=np.arange(xmin,xmax,dx)
+	yarr=np.arange(ymin,ymax,-dy) # -dy since origin='upper' (not sure how robust this is)
+	xg,yg=np.meshgrid(xarr,yarr)
 	overlaylims=options.getfieldvalue('overlaylims',[min(arr.ravel()),max(arr.ravel())])
 	norm=mpl.colors.Normalize(vmin=overlaylims[0],vmax=overlaylims[1])
@@ -118,13 +118,13 @@
                 #test
                 #m.ax=ax
-	        meridians=npy.arange(-180.,181.,1.)
-	        parallels=npy.arange(-80.,80.,1.)
+	        meridians=np.arange(-180.,181.,1.)
+	        parallels=np.arange(-80.,80.,1.)
 	        m.drawparallels(parallels,labels=[0,0,1,1]) # labels=[left,right,top,bottom]
 	        m.drawmeridians(meridians,labels=[1,1,0,0])
                 m.drawcoastlines()
-	        pc=m.pcolormesh(xg, yg, npy.flipud(arr), cmap=mpl.cm.Greys, norm=norm, ax=ax)
+	        pc=m.pcolormesh(xg, yg, np.flipud(arr), cmap=mpl.cm.Greys, norm=norm, ax=ax)
 
 	else:
-	        pc=ax.pcolormesh(xg, yg, npy.flipud(arr), cmap=mpl.cm.Greys, norm=norm)
+	        pc=ax.pcolormesh(xg, yg, np.flipud(arr), cmap=mpl.cm.Greys, norm=norm)
         
 	#rasterization? 
Index: /issm/trunk-jpl/src/py3/plot/plot_streamlines.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/plot_streamlines.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/plot_streamlines.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 import matplotlib.pyplot as plt
 import matplotlib.tri as tri
@@ -42,5 +42,5 @@
 
     # format data for matplotlib streamplot function
-    yg,xg=npy.mgrid[min(md.mesh.y):max(md.mesh.y):100j,min(md.mesh.x):max(md.mesh.x):100j]
+    yg,xg=np.mgrid[min(md.mesh.y):max(md.mesh.y):100j,min(md.mesh.x):max(md.mesh.x):100j]
     ug=griddata((x,y),u,(xg,yg),method='linear')
     vg=griddata((x,y),v,(xg,yg),method='linear')
@@ -57,6 +57,6 @@
     if linewidth=='vel':
         scale=options.getfieldvalue('streamlineswidthscale',3)
-        vel=npy.sqrt(ug**2+vg**2)
-        linewidth=scale*vel/npy.amax(vel)
+        vel=np.sqrt(ug**2+vg**2)
+        linewidth=scale*vel/np.amax(vel)
         linewidth[linewidth<0.5]=0.5
 
Index: /issm/trunk-jpl/src/py3/plot/plot_unit.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/plot_unit.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/plot_unit.py	(revision 21255)
@@ -4,5 +4,5 @@
     import matplotlib as mpl
     import matplotlib.pyplot as plt
-    import numpy as npy
+    import numpy as np
 except ImportError:
     print("could not import pylab, matplotlib has not been installed, no plotting capabilities enabled")
@@ -39,12 +39,12 @@
     #normalize colormap if clim/caxis specified
     if options.exist('clim'):
-        lims=options.getfieldvalue('clim',[npy.amin(data),npy.amax(data)])
+        lims=options.getfieldvalue('clim',[np.amin(data),np.amax(data)])
     elif options.exist('caxis'):
-        lims=options.getfieldvalue('caxis',[npy.amin(data),npy.amax(data)])
+        lims=options.getfieldvalue('caxis',[np.amin(data),np.amax(data)])
     else:
-        if npy.amin(data)==npy.amax(data):
-            lims=[npy.amin(data)-0.5,npy.amax(data)+0.5]
+        if np.amin(data)==np.amax(data):
+            lims=[np.amin(data)-0.5,np.amax(data)+0.5]
         else:
-    	    lims=[npy.amin(data),npy.amax(data)]
+    	    lims=[np.amin(data),np.amax(data)]
     norm = mpl.colors.Normalize(vmin=lims[0], vmax=lims[1])
     if datatype==1:
@@ -70,7 +70,7 @@
         vy=data[:,1]
         #TODO write plot_quiver.py to handle this here
-        color=npy.sqrt(vx**2+vy**2)
+        color=np.sqrt(vx**2+vy**2)
         scale=options.getfieldvalue('scale',1000)
-        width=options.getfieldvalue('width',0.005*(npy.amax(x)-npy.amin(y)))
+        width=options.getfieldvalue('width',0.005*(np.amax(x)-np.amin(y)))
         headwidth=options.getfieldvalue('headwidth',3)
         headlength=options.getfieldvalue('headlength',5)
Index: /issm/trunk-jpl/src/py3/plot/plotmodel.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/plotmodel.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/plotmodel.py	(revision 21255)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from plotoptions import plotoptions
 
@@ -35,5 +35,5 @@
 		nr=True
 	else:
-		nrows=npy.ceil(numberofplots/subplotwidth)
+		nrows=np.ceil(numberofplots/subplotwidth)
 		nr=False
 	
Index: /issm/trunk-jpl/src/py3/plot/processdata.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/processdata.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/processdata.py	(revision 21255)
@@ -1,4 +1,4 @@
 from math import isnan
-import numpy as npy
+import numpy as np
 
 def processdata(md,data,options):
@@ -26,8 +26,8 @@
     	numberofelements2d=md.mesh.numberofelements2d
     else:
-    	numberofvertices2d=npy.nan
-    	numberofelements2d=npy.nan
+    	numberofvertices2d=np.nan
+    	numberofelements2d=np.nan
     
-    procdata=npy.copy(data)
+    procdata=np.copy(data)
     
     #process patch
@@ -37,8 +37,8 @@
     
     #get datasize
-    if npy.ndim(procdata)==1:
-    	datasize=npy.array([len(procdata),1])
+    if np.ndim(procdata)==1:
+    	datasize=np.array([len(procdata),1])
     else:
-    	datasize=npy.shape(procdata)
+    	datasize=np.shape(procdata)
         if len(datasize)>2:
             raise ValueError('data passed to plotmodel has more than 2 dimensions; check that column vectors are rank-1')
@@ -46,12 +46,12 @@
     #process NaN's if any
     nanfill=options.getfieldvalue('nan',-9999)
-    if npy.any(npy.isnan(procdata)):
-    	lb=npy.min(data[~npy.isnan(data)])
-    	ub=npy.max(data[~npy.isnan(data)])
+    if np.any(np.isnan(procdata)):
+    	lb=np.min(data[~np.isnan(data)])
+    	ub=np.max(data[~np.isnan(data)])
     	if lb==ub:
     	    lb=lb-0.5
     	    ub=ub+0.5
     	    nanfill=lb-1
-    	procdata[npy.isnan(procdata)]=nanfill
+    	procdata[np.isnan(procdata)]=nanfill
     	options.addfielddefault('clim',[lb,ub])
     	options.addfielddefault('cmap_set_under','1')
@@ -99,5 +99,5 @@
     
     #convert rank-2 array to rank-1
-    if npy.ndim(procdata)==2 and npy.shape(procdata)[1]==1:
+    if np.ndim(procdata)==2 and np.shape(procdata)[1]==1:
     	procdata=procdata.reshape(-1,)
     
Index: /issm/trunk-jpl/src/py3/plot/processmesh.py
===================================================================
--- /issm/trunk-jpl/src/py3/plot/processmesh.py	(revision 21254)
+++ /issm/trunk-jpl/src/py3/plot/processmesh.py	(revision 21255)
@@ -1,5 +1,5 @@
 from math import isnan
 import MatlabFuncs as m
-import numpy as npy
+import numpy as np
 
 def processmesh(md,data,options):
@@ -33,5 +33,5 @@
 			z=md.mesh.z
 		else:
-			z=npy.zeros_like(md.mesh.x)
+			z=np.zeros_like(md.mesh.x)
 		
 		if 'elements2d' in dir(md.mesh): 
