Index: /issm/trunk-jpl/src/m/boundaryconditions/PattynSMB.py
===================================================================
--- /issm/trunk-jpl/src/m/boundaryconditions/PattynSMB.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/boundaryconditions/PattynSMB.py	(revision 21254)
@@ -1,4 +1,4 @@
 import os
-import numpy as npy
+import numpy as np
 def PattynSMB(md,Tf):
 	"""
@@ -25,5 +25,5 @@
 
 	#Double check lat and long exist:
-	if npy.any(npy.isnan(md.mesh.lat)): 
+	if np.any(np.isnan(md.mesh.lat)): 
 		raise IOError('PattynSMB error message: md.mesh.lat field required')
 
@@ -36,5 +36,5 @@
 	# Calculate summer temperature, Eqn (12)
 	# No melting at PIG in mean conditions - need about 6 degress Tf to start having a negative yearly SMB
-	Tms = 16.81 - 0.00692*md.geometry.surface - 0.27937*npy.abs(md.mesh.lat) + Tf
+	Tms = 16.81 - 0.00692*md.geometry.surface - 0.27937*np.abs(md.mesh.lat) + Tf
 	Tms= Tms[0]
 
@@ -43,7 +43,7 @@
 
 	# Calculate Ablation, Eqn (10) (use for both Antarctica & Greenland), max melt is 10m/a
-	ABLdot=0.*npy.ones(md.mesh.numberofvertices)
-	pos=npy.nonzero(Tms>=0)
-	ABLdot[pos]=npy.minimum(1.4*Tms[pos],10)
+	ABLdot=0.*np.ones(md.mesh.numberofvertices)
+	pos=np.nonzero(Tms>=0)
+	ABLdot[pos]=np.minimum(1.4*Tms[pos],10)
 
 	smb=ACCdot-ABLdot
Index: /issm/trunk-jpl/src/m/classes/outputdefinition.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/outputdefinition.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/classes/outputdefinition.py	(revision 21254)
@@ -2,5 +2,5 @@
 from checkfield import checkfield
 from WriteData import WriteData
-import numpy as npy
+import numpy as np
 
 class outputdefinition(object):
@@ -40,5 +40,5 @@
 			data.append(classdefinition)
 
-		data=npy.unique(data);
+		data=np.unique(data);
 		WriteData(fid,prefix,'data',data,'name','md.outputdefinition.list','format','StringArray');
 	# }}}
Index: /issm/trunk-jpl/src/m/classes/qmu.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/qmu.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/classes/qmu.py	(revision 21254)
@@ -5,5 +5,4 @@
 from checkfield import checkfield
 from WriteData import WriteData
-import MatlabFuncs as m
 
 class qmu(object):
@@ -41,7 +40,6 @@
 
 		s+="%s\n" % fielddisplay(self,'isdakota','is qmu analysis activated?')
-		for i,variable in enumerate(self.variables.iteritems()):
-			s+="         variables%s:  (arrays of each variable class)\n" % \
-					string_dim(self.variables,i)
+		for variable in self.variables:
+			s+="         variables%s:  (arrays of each variable class)\n" % len(variable)
 			fnames=vars(variable)
 			maxlen=0
@@ -53,7 +51,6 @@
 						(maxlen+1,fname,size(getattr(variable,fname)),type(getattr(variable,fname)))
 
-		for i,response in enumerate(self.responses.iteritems()):
-			s+="         responses%s:  (arrays of each response class)\n" % \
-					string_dim(self.responses,i)
+		for response in self.responses:
+			s+="         responses%s:  (arrays of each response class)\n" % len(responses)
 			fnames=vars(response)
 			maxlen=0
@@ -67,12 +64,11 @@
 		s+="%s\n" % fielddisplay(self,'numberofresponses','number of responses') 
 
-		for i,method in enumerate(self.method.iteritems()):
+		for method in self.method:
 			if isinstance(method,'dakota_method'):
 				s+="            method%s :    '%s'\n" % \
-						(string_dim(method,i),method.method)
+						(len(method),method.method)
 
-		for i,param in enumerate(self.params.iteritems()):
-			s+="         params%s:  (array of method-independent parameters)\n" % \
-					string_dim(self.params,i)
+		for param in self.params:
+			s+="         params%s:  (array of method-independent parameters)\n" % len(param)
 			fnames=vars(param)
 			maxlen=0
@@ -84,7 +80,6 @@
 						(maxlen+1,fname,any2str(getattr(param,fname)))
 
-		for i,result in enumerate(self.results.iteritems()):
-			s+="         results%s:  (information from dakota files)\n" % \
-					string_dim(self.results,i)
+		for result in self.results:
+			s+="         results%s:  (information from dakota files)\n" % len(self.result)
 			fnames=vars(result)
 			maxlen=0
@@ -132,5 +127,5 @@
 				md.checkmessage("for qmu analysis, partitioning vector cannot go over npart, number of partition areas")
 
-		if not m.strcmpi(md.cluster.name,'none'):
+		if md.cluster.name!='none':
 			if not md.settings.waitonlock:
 				md.checkmessage("waitonlock should be activated when running qmu in parallel mode!")
Index: /issm/trunk-jpl/src/m/classes/settings.py
===================================================================
--- /issm/trunk-jpl/src/m/classes/settings.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/classes/settings.py	(revision 21254)
@@ -16,5 +16,5 @@
 		self.lowmem              = 0
 		self.output_frequency    = 0
-		self.recording_frequency    = 0
+		self.recording_frequency = 0
 		self.waitonlock          = 0
 
Index: /issm/trunk-jpl/src/m/coordsystems/ll2xy.py
===================================================================
--- /issm/trunk-jpl/src/m/coordsystems/ll2xy.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/coordsystems/ll2xy.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy 
+import numpy as np 
 
 def ll2xy(lat,lon,sgn=-1,central_meridian=0,standard_parallel=71):
@@ -36,24 +36,24 @@
 	ex2 = .006693883
 	# Eccentricity of the Hughes ellipsoid
-	ex = npy.sqrt(ex2)
+	ex = np.sqrt(ex2)
 	
-	latitude = npy.abs(lat) * npy.pi/180.
-	longitude = (lon + delta) * npy.pi/180.
+	latitude = np.abs(lat) * np.pi/180.
+	longitude = (lon + delta) * np.pi/180.
 	
 	# compute X and Y in grid coordinates.
-	T = npy.tan(npy.pi/4-latitude/2) / ((1-ex*npy.sin(latitude))/(1+ex*npy.sin(latitude)))**(ex/2)
+	T = np.tan(np.pi/4-latitude/2) / ((1-ex*np.sin(latitude))/(1+ex*np.sin(latitude)))**(ex/2)
 	
 	if (90 - slat) <  1.e-5:
-		rho = 2.*re*T/npy.sqrt((1.+ex)**(1.+ex)*(1.-ex)**(1.-ex))
+		rho = 2.*re*T/np.sqrt((1.+ex)**(1.+ex)*(1.-ex)**(1.-ex))
 	else:
-		sl  = slat*npy.pi/180.
-		tc  = npy.tan(npy.pi/4.-sl/2.)/((1.-ex*npy.sin(sl))/(1.+ex*npy.sin(sl)))**(ex/2.)
-		mc  = npy.cos(sl)/npy.sqrt(1.0-ex2*(npy.sin(sl)**2))
+		sl  = slat*np.pi/180.
+		tc  = np.tan(np.pi/4.-sl/2.)/((1.-ex*np.sin(sl))/(1.+ex*np.sin(sl)))**(ex/2.)
+		mc  = np.cos(sl)/np.sqrt(1.0-ex2*(np.sin(sl)**2))
 		rho = re*mc*T/tc
 	
-	y = -rho * sgn * npy.cos(sgn*longitude)
-	x =  rho * sgn * npy.sin(sgn*longitude)
+	y = -rho * sgn * np.cos(sgn*longitude)
+	x =  rho * sgn * np.sin(sgn*longitude)
 
-	cnt1=npy.nonzero(latitude>= npy.pi/2.)[0]
+	cnt1=np.nonzero(latitude>= np.pi/2.)[0]
 	
 	if cnt1:
Index: /issm/trunk-jpl/src/m/coordsystems/xy2ll.py
===================================================================
--- /issm/trunk-jpl/src/m/coordsystems/xy2ll.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/coordsystems/xy2ll.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from math import pi
 
@@ -39,7 +39,7 @@
 	# if x,y passed as lists, convert to numpy arrays
 	if type(x) != "numpy.ndarray":
-		x=npy.array(x)
+		x=np.array(x)
 	if type(y) != "numpy.ndarray":
-		y=npy.array(y)
+		y=np.array(y)
 
 	## Conversion constant from degrees to radians
@@ -50,26 +50,26 @@
 	ex2 = .006693883
 	## Eccentricity of the Hughes ellipsoid
-	ex = npy.sqrt(ex2)
+	ex = np.sqrt(ex2)
 	
 	sl = slat*pi/180.
-	rho = npy.sqrt(x**2 + y**2)
-	cm = npy.cos(sl) / npy.sqrt(1.0 - ex2 * (npy.sin(sl)**2))
-	T = npy.tan((pi/4.0) - (sl/2.0)) / ((1.0 - ex*npy.sin(sl)) / (1.0 + ex*npy.sin(sl)))**(ex / 2.0)
+	rho = np.sqrt(x**2 + y**2)
+	cm = np.cos(sl) / np.sqrt(1.0 - ex2 * (np.sin(sl)**2))
+	T = np.tan((pi/4.0) - (sl/2.0)) / ((1.0 - ex*np.sin(sl)) / (1.0 + ex*np.sin(sl)))**(ex / 2.0)
 	
 	if abs(slat-90.) < 1.e-5:
-		T = rho*npy.sqrt((1. + ex)**(1. + ex) * (1. - ex)**(1. - ex)) / 2. / re
+		T = rho*np.sqrt((1. + ex)**(1. + ex) * (1. - ex)**(1. - ex)) / 2. / re
 	else:
 		T = rho * T / (re * cm)
 	
-	chi = (pi / 2.0) - 2.0 * npy.arctan(T)
+	chi = (pi / 2.0) - 2.0 * np.arctan(T)
 	lat = chi + ((ex2 / 2.0) + (5.0 * ex2**2.0 / 24.0) + (ex2**3.0 / 12.0)) * \
-		npy.sin(2 * chi) + ((7.0 * ex2**2.0 / 48.0) + (29.0 * ex2**3 / 240.0)) * \
-		npy.sin(4.0 * chi) + (7.0 * ex2**3.0 / 120.0) * npy.sin(6.0 * chi) 
+		np.sin(2 * chi) + ((7.0 * ex2**2.0 / 48.0) + (29.0 * ex2**3 / 240.0)) * \
+		np.sin(4.0 * chi) + (7.0 * ex2**3.0 / 120.0) * np.sin(6.0 * chi) 
 	
 	lat = sgn * lat
-	lon = npy.arctan2(sgn * x,-sgn * y)
+	lon = np.arctan2(sgn * x,-sgn * y)
 	lon = sgn * lon
 	
-	res1 = npy.nonzero(rho <= 0.1)[0]
+	res1 = np.nonzero(rho <= 0.1)[0]
 	if len(res1) > 0:
 		lat[res1] = 90. * sgn
Index: /issm/trunk-jpl/src/m/exp/expcoarsen.py
===================================================================
--- /issm/trunk-jpl/src/m/exp/expcoarsen.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/exp/expcoarsen.py	(revision 21254)
@@ -1,4 +1,4 @@
 import os.path
-import numpy as npy
+import numpy as np
 from collections import OrderedDict
 from expread import expread
@@ -34,5 +34,5 @@
 	for contour in  contours:
 		
-		numpoints=npy.size(contour['x'])
+		numpoints=np.size(contour['x'])
 
 		j=0
@@ -44,17 +44,17 @@
 
 			#see whether we keep this point or not
-			distance=npy.sqrt((x[j]-x[j+1])**2+(y[j]-y[j+1])**2)
+			distance=np.sqrt((x[j]-x[j+1])**2+(y[j]-y[j+1])**2)
 			if distance<resolution and j<numpoints-2:   #do not remove last point
-				x=npy.delete(x,j+1,0)
-				y=npy.delete(y,j+1,0)
+				x=np.delete(x,j+1,0)
+				y=np.delete(y,j+1,0)
 				numpoints=numpoints-1
 			else:
-				division=int(npy.floor(distance/resolution)+1)
+				division=int(np.floor(distance/resolution)+1)
 				if division>=2:
-					xi=npy.linspace(x[j],x[j+1],division)
-					yi=npy.linspace(y[j],y[j+1],division)
+					xi=np.linspace(x[j],x[j+1],division)
+					yi=np.linspace(y[j],y[j+1],division)
 					
-					x=npy.hstack((x[0:j+1],xi[1:-1],x[j+1:]))
-					y=npy.hstack((y[0:j+1],yi[1:-1],y[j+1:]))
+					x=np.hstack((x[0:j+1],xi[1:-1],x[j+1:]))
+					y=np.hstack((y[0:j+1],yi[1:-1],y[j+1:]))
 
 					#update current point
@@ -65,8 +65,8 @@
 					j=j+1
 		
-		if npy.size(x)>1:
+		if np.size(x)>1:
 			#keep the (x,y) contour arond
 			newcontour=OrderedDict()
-			newcontour['nods']=npy.size(x)
+			newcontour['nods']=np.size(x)
 			newcontour['density']=contour['density']
 			newcontour['x']=x
Index: /issm/trunk-jpl/src/m/exp/expdisp.py
===================================================================
--- /issm/trunk-jpl/src/m/exp/expdisp.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/exp/expdisp.py	(revision 21254)
@@ -1,4 +1,4 @@
 from expread import expread
-import numpy as npy
+import numpy as np
 
 def expdisp(domainoutline,ax,linestyle='--k',linewidth=1,unitmultiplier=1.):
Index: /issm/trunk-jpl/src/m/extrusion/DepthAverage.py
===================================================================
--- /issm/trunk-jpl/src/m/extrusion/DepthAverage.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/extrusion/DepthAverage.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from project2d import project2d
 
@@ -19,7 +19,7 @@
 
 	# coerce to array in case float is passed
-	if type(vector)!=npy.ndarray:
+	if type(vector)!=np.ndarray:
 		print 'coercing array'
-		vector=npy.array(value)
+		vector=np.array(value)
 
 	vec2d=False
@@ -30,5 +30,5 @@
 	#nods data
 	if vector.shape[0]==md.mesh.numberofvertices:
-		vector_average=npy.zeros(md.mesh.numberofvertices2d)
+		vector_average=np.zeros(md.mesh.numberofvertices2d)
 		for i in xrange(1,md.mesh.numberoflayers):
 			vector_average=vector_average+(project2d(md,vector,i)+project2d(md,vector,i+1))/2.*(project2d(md,md.mesh.z,i+1)-project2d(md,md.mesh.z,i))
@@ -37,5 +37,5 @@
 	#element data
 	elif vector.shape[0]==md.mesh.numberofelements:
-		vector_average=npy.zeros(md.mesh.numberofelements2d)
+		vector_average=np.zeros(md.mesh.numberofelements2d)
 		for i in xrange(1,md.mesh.numberoflayers):
 			vector_average=vector_average+project2d(md,vector,i)*(project2d(md,md.mesh.z,i+1)-project2d(md,md.mesh.z,i))
Index: /issm/trunk-jpl/src/m/extrusion/project2d.py
===================================================================
--- /issm/trunk-jpl/src/m/extrusion/project2d.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/extrusion/project2d.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def project2d(md3d,value,layer):
@@ -25,7 +25,7 @@
 	
 	# coerce to array in case float is passed
-	if type(value)!=npy.ndarray:
+	if type(value)!=np.ndarray:
 		print 'coercing array'
-		value=npy.array(value)
+		value=np.array(value)
 
 	vec2d=False
Index: /issm/trunk-jpl/src/m/geometry/slope.py
===================================================================
--- /issm/trunk-jpl/src/m/geometry/slope.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/geometry/slope.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from GetNodalFunctionsCoeff import  GetNodalFunctionsCoeff
 
@@ -33,14 +33,14 @@
 	alpha,beta=GetNodalFunctionsCoeff(index,x,y)[0:2]
 
-	summation=npy.array([[1],[1],[1]])
-	sx=npy.dot(surf[index-1]*alpha,summation).reshape(-1,)
-	sy=npy.dot(surf[index-1]*beta,summation).reshape(-1,)
+	summation=np.array([[1],[1],[1]])
+	sx=np.dot(surf[index-1]*alpha,summation).reshape(-1,)
+	sy=np.dot(surf[index-1]*beta,summation).reshape(-1,)
 
-	s=npy.sqrt(sx**2+sy**2)
+	s=np.sqrt(sx**2+sy**2)
 
 	if md.mesh.dimension()==3:
 		sx=project3d(md,'vector',sx,'type','element')
 		sy=project3d(md,'vector',sy,'type','element')
-		s=npy.sqrt(sx**2+sy**2)
+		s=np.sqrt(sx**2+sy**2)
 
 	return (sx,sy,s)
Index: /issm/trunk-jpl/src/m/interp/SectionValues.py
===================================================================
--- /issm/trunk-jpl/src/m/interp/SectionValues.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/interp/SectionValues.py	(revision 21254)
@@ -1,5 +1,5 @@
 import os
 from expread import expread
-import numpy as npy
+import numpy as np
 from project2d import project2d
 #from InterpFromMesh2d import InterpFromMesh2d
@@ -41,7 +41,7 @@
 
 	#initialization
-	X=npy.array([]) #X-coordinate
-	Y=npy.array([]) #Y-coordinate
-	S=npy.array([0.])  #curvilinear coordinate
+	X=np.array([]) #X-coordinate
+	Y=np.array([]) #Y-coordinate
+	S=np.array([0.])  #curvilinear coordinate
 	
 	for i in xrange(nods-1):
@@ -53,10 +53,10 @@
 		s_start=S[-1]
 	
-		length_segment=npy.sqrt((x_end-x_start)**2+(y_end-y_start)**2)
-		portion=npy.ceil(length_segment/res_h)
+		length_segment=np.sqrt((x_end-x_start)**2+(y_end-y_start)**2)
+		portion=np.ceil(length_segment/res_h)
 	
-		x_segment=npy.zeros(portion)
-		y_segment=npy.zeros(portion)
-		s_segment=npy.zeros(portion)
+		x_segment=np.zeros(portion)
+		y_segment=np.zeros(portion)
+		s_segment=np.zeros(portion)
 
 		for j in xrange(int(portion)):
@@ -66,10 +66,10 @@
 	
 		#plug into X and Y
-		X=npy.append(X,x_segment)
-		Y=npy.append(Y,y_segment)
-		S=npy.append(S,s_segment)
+		X=np.append(X,x_segment)
+		Y=np.append(Y,y_segment)
+		S=np.append(S,s_segment)
 
-	X=npy.append(X,x[nods-1])
-	Y=npy.append(Y,y[nods-1])
+	X=np.append(X,x[nods-1])
+	Y=np.append(Y,y[nods-1])
 	
 	#Number of nodes:
@@ -77,5 +77,5 @@
 	
 	#Compute Z
-	Z=npy.zeros(numberofnodes)
+	Z=np.zeros(numberofnodes)
 	
 	#New mesh and Data interpolation
@@ -88,5 +88,5 @@
 	
 		#Compute index
-		index=npy.array([range(1,numberofnodes),range(2,numberofnodes+1)]).T
+		index=np.array([range(1,numberofnodes),range(2,numberofnodes+1)]).T
 	
 	else:
@@ -102,5 +102,5 @@
 	
 		#Some useful parameters
-		layers=int(npy.ceil(npy.mean(md.geometry.thickness)/res_v))
+		layers=int(np.ceil(np.mean(md.geometry.thickness)/res_v))
 		nodesperlayer=int(numberofnodes)
 		nodestot=int(nodesperlayer*layers)
@@ -109,9 +109,9 @@
 	
 		#initialization
-		X3=npy.zeros(nodesperlayer*layers) 
-		Y3=npy.zeros(nodesperlayer*layers) 
-		Z3=npy.zeros(nodesperlayer*layers) 
-		S3=npy.zeros(nodesperlayer*layers) 
-		index3=npy.zeros((elementstot,4))
+		X3=np.zeros(nodesperlayer*layers) 
+		Y3=np.zeros(nodesperlayer*layers) 
+		Z3=np.zeros(nodesperlayer*layers) 
+		S3=np.zeros(nodesperlayer*layers) 
+		index3=np.zeros((elementstot,4))
 	
 		#Get new coordinates in 3d
@@ -123,9 +123,9 @@
 	
 			if i<layers-1:  #Build index3 with quads
-				ids=npy.vstack((npy.arange(i,nodestot-layers,layers),npy.arange(i+1,nodestot-layers,layers),npy.arange(i+layers+1,nodestot,layers),npy.arange(i+layers,nodestot,layers))).T
+				ids=np.vstack((np.arange(i,nodestot-layers,layers),np.arange(i+1,nodestot-layers,layers),np.arange(i+layers+1,nodestot,layers),np.arange(i+layers,nodestot,layers))).T
 				index3[(i-1)*elementsperlayer:i*elementsperlayer,:]=ids
 
 		#Interpolation of data on specified points
-		data_interp=InterpFromMeshToMesh3d(md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.z,data,X3,Y3,Z3,npy.nan)[0]
+		data_interp=InterpFromMeshToMesh3d(md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.z,data,X3,Y3,Z3,np.nan)[0]
 	
 		#build outputs
Index: /issm/trunk-jpl/src/m/interp/averaging.py
===================================================================
--- /issm/trunk-jpl/src/m/interp/averaging.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/interp/averaging.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from GetAreas import GetAreas
 import MatlabFuncs as m
@@ -39,8 +39,8 @@
 	#initialization
 	if layer==0:
-		weights=npy.zeros(md.mesh.numberofvertices,)
+		weights=np.zeros(md.mesh.numberofvertices,)
 		data=data.flatten(1)
 	else:
-		weights=npy.zeros(md.mesh.numberofvertices2d,)
+		weights=np.zeros(md.mesh.numberofvertices2d,)
 		data=data[(layer-1)*md.mesh.numberofvertices2d+1:layer*md.mesh.numberofvertices2d,:]
 	
@@ -69,14 +69,14 @@
 	index=index-1 # since python indexes starting from zero
 	line=index.flatten(1)
-	areas=npy.vstack(areas).reshape(-1,)
-	summation=1./rep*npy.ones(rep,)
+	areas=np.vstack(areas).reshape(-1,)
+	summation=1./rep*np.ones(rep,)
 	linesize=rep*numberofelements
 	
 	#update weights that holds the volume of all the element holding the node i
-	weights=csc_matrix( (npy.tile(areas,(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1))
+	weights=csc_matrix( (np.tile(areas,(rep,1)).reshape(-1,),(line,np.zeros(linesize,))), shape=(numberofnodes,1))
 	
 	#initialization
 	if len(data)==numberofelements:
-		average_node=csc_matrix( (npy.tile(areas*data,(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1))
+		average_node=csc_matrix( (np.tile(areas*data,(rep,1)).reshape(-1,),(line,np.zeros(linesize,))), shape=(numberofnodes,1))
 		average_node=average_node/weights
 		average_node = csc_matrix(average_node)
@@ -85,12 +85,12 @@
 
 	#loop over iteration
-	for i in npy.arange(1,iterations+1):
-		average_el=npy.asarray(npy.dot(average_node.todense()[index].reshape(numberofelements,rep),npy.vstack(summation))).reshape(-1,)
-		average_node=csc_matrix( (npy.tile(areas*average_el.reshape(-1),(rep,1)).reshape(-1,),(line,npy.zeros(linesize,))), shape=(numberofnodes,1))
+	for i in np.arange(1,iterations+1):
+		average_el=np.asarray(np.dot(average_node.todense()[index].reshape(numberofelements,rep),np.vstack(summation))).reshape(-1,)
+		average_node=csc_matrix( (np.tile(areas*average_el.reshape(-1),(rep,1)).reshape(-1,),(line,np.zeros(linesize,))), shape=(numberofnodes,1))
 		average_node=average_node/weights
 		average_node=csc_matrix(average_node)
 	
 	#return output as a full matrix (C code do not like sparse matrices)
-	average=npy.asarray(average_node.todense()).reshape(-1,)
+	average=np.asarray(average_node.todense()).reshape(-1,)
 
 	return average
Index: /issm/trunk-jpl/src/m/interp/holefiller.py
===================================================================
--- /issm/trunk-jpl/src/m/interp/holefiller.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/interp/holefiller.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from scipy.spatial import cKDTree
 
@@ -30,6 +30,6 @@
 	filled=data
 	
-	XYGood=npy.dstack([x[goodids],y[goodids]])[0]
-	XYBad=npy.dstack([x[badids],y[badids]])[0]
+	XYGood=np.dstack([x[goodids],y[goodids]])[0]
+	XYBad=np.dstack([x[badids],y[badids]])[0]
 	tree=cKDTree(XYGood)
 	nearest=tree.query(XYBad,k=knn)[1]
@@ -42,5 +42,5 @@
 			for j in range(knn):
 				neardat.append(filled[goodids][nearest[i][j]])
-				filled[badids[i]]=npy.mean(neardat)
+				filled[badids[i]]=np.mean(neardat)
 				
 	return filled
Index: /issm/trunk-jpl/src/m/interp/interp.py
===================================================================
--- /issm/trunk-jpl/src/m/interp/interp.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/interp/interp.py	(revision 21254)
@@ -1,4 +1,4 @@
 # module for inperpolating/smoothing data
-import numpy as npy
+import numpy as np
 from scipy.interpolate import CloughTocher2DInterpolator, Rbf
 from scipy.spatial import cKDTree
@@ -51,19 +51,19 @@
 	xlim=[min(xi)-dx,max(xi)+dx]
 	ylim=[min(yi)-dy,max(yi)+dy]
-	xflag=npy.logical_and(x>xlim[0],x<xlim[1])
-	yflag=npy.logical_and(y>ylim[0],y<ylim[1])
-	bothind=npy.nonzero(npy.logical_and(xflag,yflag))
+	xflag=np.logical_and(x>xlim[0],x<xlim[1])
+	yflag=np.logical_and(y>ylim[0],y<ylim[1])
+	bothind=np.nonzero(np.logical_and(xflag,yflag))
 	subdata=data[bothind]
 	subx=x[bothind]
 	suby=y[bothind]
-	points=npy.array([subx,suby]).T
+	points=np.array([subx,suby]).T
 
 	# mask out any nan's in the data and corresponding coordinate points
-	mask=npy.isnan(subdata)
-	ind=npy.nonzero(mask)[0]
+	mask=np.isnan(subdata)
+	ind=np.nonzero(mask)[0]
 	if len(ind) and fill_nans:
 		print "		WARNING: filling nans using spline fit through good data points, which may or may not be appropriate. Check results carefully."
-	subdata=npy.delete(subdata,ind)
-	points=npy.delete(points,ind,axis=0)
+	subdata=np.delete(subdata,ind)
+	points=np.delete(points,ind,axis=0)
 
 	if maxiter:
@@ -76,15 +76,15 @@
 	if not fill_nans:
 		# identify nan's in xi,yi using nearest neighbors
-		xyinterp=npy.dstack([xi,yi])[0]
-		xg,yg=npy.meshgrid(subx,suby)
-		xydata=npy.dstack([subx,suby])[0]
+		xyinterp=np.dstack([xi,yi])[0]
+		xg,yg=np.meshgrid(subx,suby)
+		xydata=np.dstack([subx,suby])[0]
 		tree=cKDTree(xydata)
 		nearest=tree.query(xyinterp)[1]
-		pos=npy.nonzero(npy.isnan(subdata[nearest]))
+		pos=np.nonzero(np.isnan(subdata[nearest]))
 		interpdata[pos]=subdata[nearest][pos]
 
 	return interpdata
 #}}}
-def GridSplineToMesh2d(x,y,data,xi,yi,default_value=npy.nan,plotonly=False,fill_nans=False):#{{{
+def GridSplineToMesh2d(x,y,data,xi,yi,default_value=np.nan,plotonly=False,fill_nans=False):#{{{
 	'''
 	python analog to InterpFromGridToMesh.  This routine uses
@@ -108,5 +108,5 @@
 
 	Usage:
-		interpdata=GridToMesh(x,y,data,xi,yi,default_value=npy.nan,plotonly=False,fill_nans=False)
+		interpdata=GridToMesh(x,y,data,xi,yi,default_value=np.nan,plotonly=False,fill_nans=False)
 
 	Examples:
@@ -114,7 +114,7 @@
 	'''
 
-	if npy.ndim(x)==2:
+	if np.ndim(x)==2:
 		x=x.reshape(-1,)
-	if npy.ndim(y)==2:
+	if np.ndim(y)==2:
 		y=y.reshape(-1,)
 	if len(x) != data.shape[1]+1 and len(x) != data.shape[1]:
@@ -134,34 +134,34 @@
 	if len(x)==data.shape[1] and len(y)==data.shape[0]:
 		print '		x,y taken to define the center of data grid cells'
-		xind=npy.nonzero(npy.logical_and(x>xlim[0],x<xlim[1]))[0]
-		yind=npy.nonzero(npy.logical_and(y>ylim[0],y<ylim[1]))[0]
-		xg,yg=npy.meshgrid(x[xind],y[yind])
+		xind=np.nonzero(np.logical_and(x>xlim[0],x<xlim[1]))[0]
+		yind=np.nonzero(np.logical_and(y>ylim[0],y<ylim[1]))[0]
+		xg,yg=np.meshgrid(x[xind],y[yind])
 		subdata=data[yind[0]:yind[-1]+1,xind[0]:xind[-1]+1]
 	elif len(x)==data.shape[1]+1 and len(y)==data.shape[0]+1:
 		print '		x,y taken to define the corners of data grid cells'
-		xcenter=npy.fromiter(((x[i]+x[i+1])/2 for i in range(len(x)-1)),npy.float)
-		ycenter=npy.fromiter(((y[i]+y[i+1])/2 for i in range(len(y)-1)),npy.float)
-		xind=npy.nonzero(npy.logical_and(xcenter>xlim[0],xcenter<xlim[1]))[0]
-		yind=npy.nonzero(npy.logical_and(ycenter>ylim[0],ycenter<ylim[1]))[0]
-		xg,yg=npy.meshgrid(xcenter[xind],ycenter[yind])
+		xcenter=np.fromiter(((x[i]+x[i+1])/2 for i in range(len(x)-1)),np.float)
+		ycenter=np.fromiter(((y[i]+y[i+1])/2 for i in range(len(y)-1)),np.float)
+		xind=np.nonzero(np.logical_and(xcenter>xlim[0],xcenter<xlim[1]))[0]
+		yind=np.nonzero(np.logical_and(ycenter>ylim[0],ycenter<ylim[1]))[0]
+		xg,yg=np.meshgrid(xcenter[xind],ycenter[yind])
 		subdata=data[yind[0]:yind[-1]+1,xind[0]:xind[-1]+1]
 	else:
 		raise ValueError('x and y have inconsistent sizes: both should have length ncols(data)/nrows(data) or ncols(data)+1/nrows(data)+1')
 
-	points=npy.array([xg.ravel(),yg.ravel()]).T
+	points=np.array([xg.ravel(),yg.ravel()]).T
 	flatsubdata=subdata.ravel()
 
 	if plotonly:
-		plt.imshow(npy.flipud(subdata),origin='upper')
+		plt.imshow(np.flipud(subdata),origin='upper')
 		plt.show()
 		return
 
 	# mask out any nan's in the data and corresponding coordinate points
-	mask=npy.isnan(flatsubdata)
-	ind=npy.nonzero(mask)[0]
+	mask=np.isnan(flatsubdata)
+	ind=np.nonzero(mask)[0]
 	if len(ind) and fill_nans:
 		print "		WARNING: filling nans using spline fit through good data points, which may or may not be appropriate. Check results carefully."
-	goodsubdata=npy.delete(flatsubdata,ind)
-	goodpoints=npy.delete(points,ind,axis=0)
+	goodsubdata=np.delete(flatsubdata,ind)
+	goodpoints=np.delete(points,ind,axis=0)
 
 	# create spline and index spline at mesh points
@@ -171,9 +171,9 @@
 	if not fill_nans:
 		# identify nan's in xi,yi using nearest neighbors
-		xyinterp=npy.dstack([xi,yi])[0]
-		xydata=npy.dstack([xg.ravel(),yg.ravel()])[0]
+		xyinterp=np.dstack([xi,yi])[0]
+		xydata=np.dstack([xg.ravel(),yg.ravel()])[0]
 		tree=cKDTree(xydata)
 		nearest=tree.query(xyinterp)[1]
-		pos=npy.nonzero(npy.isnan(flatsubdata[nearest]))
+		pos=np.nonzero(np.isnan(flatsubdata[nearest]))
 		interpdata[pos]=flatsubdata[nearest][pos]
 
Index: /issm/trunk-jpl/src/m/inversions/parametercontroldrag.py
===================================================================
--- /issm/trunk-jpl/src/m/inversions/parametercontroldrag.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/inversions/parametercontroldrag.py	(revision 21254)
@@ -26,6 +26,6 @@
 
 	#weights
-	weights=options.getfieldvalue('weights',npy.ones(md.mesh.numberofvertices))
-	if npy.size(weights)!=md.mesh.numberofvertices:
+	weights=options.getfieldvalue('weights',np.ones(md.mesh.numberofvertices))
+	if np.size(weights)!=md.mesh.numberofvertices:
 		md.inversion.cost_functions_coefficients=ones(md.mesh.numberofvertices)
 	else:
@@ -34,5 +34,5 @@
 	#nsteps
 	nsteps=options.getfieldvalue('nsteps',100);
-	if (npy.size(nsteps)!=1) | (nsteps<=0) | (floor(nsteps)!=nsteps):
+	if (np.size(nsteps)!=1) | (nsteps<=0) | (floor(nsteps)!=nsteps):
 		md.inversion.nsteps=100
 	else:
@@ -41,7 +41,7 @@
 	#cm_min
 	cm_min=options.getfieldvalue('cm_min',ones(md.mesh.numberofvertices))
-	if (npy.size(cm_min)==1):
+	if (np.size(cm_min)==1):
 		md.inversion.min_parameters=cm_min*ones(md.mesh.numberofvertices)
-	elif (npy.size(cm_min)==md.mesh.numberofvertices):
+	elif (np.size(cm_min)==md.mesh.numberofvertices):
 		md.inversion.min_parameters=cm_min
 	else:
@@ -50,7 +50,7 @@
 	#cm_max
 	cm_max=options.getfieldvalue('cm_max',250*ones(md.mesh.numberofvertices))
-	if (npy.size(cm_max)==1):
+	if (np.size(cm_max)==1):
 		md.inversion.max_parameters=cm_max*ones(md.mesh.numberofvertices)
-	elif (npy.size(cm_max)==md.mesh.numberofvertices):
+	elif (np.size(cm_max)==md.mesh.numberofvertices):
 		md.inversion.max_parameters=cm_max
 	else:
@@ -59,5 +59,5 @@
 	#eps_cm
 	eps_cm=optoins.getfieldvalue('eps_cm',float('nan'))
-	if (npy.size(eps_cm)~=1 | eps_cm<0 ):
+	if (np.size(eps_cm)~=1 | eps_cm<0 ):
 		md.inversion.cost_function_threshold=float('nan')
 	else:
@@ -66,5 +66,5 @@
 	#maxiter
 	maxiter=options.getfieldvalue('maxiter',10*ones(md.inversion.nsteps))
-	if (npy.any(maxiter<0) | npy.any(floor(maxiter)~=maxiter)):
+	if (np.any(maxiter<0) | np.any(floor(maxiter)~=maxiter)):
 		md.inversion.maxiter_per_step=10*ones(md.inversion.nsteps)
 	else:
@@ -75,5 +75,5 @@
 	#cm_jump
 	cm_jump=options.getfieldvalue('cm_jump',0.8*ones(md.inversion.nsteps))
-	if !npy.isreal(cm_jump):
+	if !np.isreal(cm_jump):
 		md.inversion.step_threshold=0.8*ones(md.inversion.nsteps)
 	else:
Index: /issm/trunk-jpl/src/m/materials/TMeltingPoint.py
===================================================================
--- /issm/trunk-jpl/src/m/materials/TMeltingPoint.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/materials/TMeltingPoint.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def TMeltingPoint(reftemp,pressure):
@@ -17,5 +17,5 @@
 
 	#ensure ref is same dimension as pressure
-	ref=reftemp*npy.ones_like(pressure)
+	ref=reftemp*np.ones_like(pressure)
 
 	return reftemp-beta*pressure
Index: /issm/trunk-jpl/src/m/mech/analyticaldamage.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/analyticaldamage.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/analyticaldamage.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 #from plotmodel import plotmodel
@@ -54,5 +54,5 @@
 
 	if isinstance(sigmab,(int,float)):
-		sigmab=sigmab*npy.ones((md.mesh.numberofvertices,))
+		sigmab=sigmab*np.ones((md.mesh.numberofvertices,))
 
 	# check inputs
@@ -61,5 +61,5 @@
 	if not '2d' in md.mesh.__doc__:
 		raise StandardError('only 2d (planview) model supported currently')
-	if npy.any(md.flowequation.element_equation!=2):
+	if np.any(md.flowequation.element_equation!=2):
 		print 'Warning: the model has some non SSA elements. These will be treated like SSA elements'
 
@@ -76,28 +76,28 @@
 	n=averaging(md,md.materials.rheology_n,0)
 	
-	D=1.-(1.+a+a**2+b**2)**((n-1.)/(2.*n))/npy.abs(ex)**(1./n)*(T-sigmab)/B/(2.+a)/npy.sign(ex)
+	D=1.-(1.+a+a**2+b**2)**((n-1.)/(2.*n))/np.abs(ex)**(1./n)*(T-sigmab)/B/(2.+a)/np.sign(ex)
 	
 	# D>1 where (2+a).*sign(ex)<0, compressive regions where high backstress needed
-	pos=npy.nonzero(D>1)
+	pos=np.nonzero(D>1)
 	D[pos]=0
 	
-	backstress=npy.zeros((md.mesh.numberofvertices,))
+	backstress=np.zeros((md.mesh.numberofvertices,))
 
 	# backstress to bring D down to one 
-	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*npy.sign(ex[pos])*(2.+a[pos])*npy.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
+	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*np.sign(ex[pos])*(2.+a[pos])*np.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
 	
-	pos=npy.nonzero(D<0)
+	pos=np.nonzero(D<0)
 	#mask=ismember(1:md.mesh.numberofvertices,pos);
 	D[pos]=0
 	
 	# backstress to bring negative damage to zero
-	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*npy.sign(ex[pos])*(2.+a[pos])*npy.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
+	backstress[pos]=T[pos]-(1.-D[pos])*B[pos]*np.sign(ex[pos])*(2.+a[pos])*np.abs(ex[pos])**(1./n[pos])/(1.+a[pos]+a[pos]**2)**((n[pos]-1.)/2./n[pos])
 	
-	pos=npy.nonzero(backstress<0)
+	pos=np.nonzero(backstress<0)
 	backstress[pos]=0
 	
 	# rigidity from Thomas relation for D=0 and backstress=0
-	B=npy.sign(ex)/(2.+a)*(1.+a+a**2)**((n-1.)/2./n)*T/(npy.abs(ex)**(1./n))
-	pos=npy.nonzero(B<0)
+	B=np.sign(ex)/(2.+a)*(1.+a+a**2)**((n-1.)/2./n)*T/(np.abs(ex)**(1./n))
+	pos=np.nonzero(B<0)
 	B[pos]=md.materials.rheology_B[pos]
 	
Index: /issm/trunk-jpl/src/m/mech/backstressfrominversion.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/backstressfrominversion.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/backstressfrominversion.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 from thomasparams import thomasparams
@@ -65,10 +65,10 @@
 	if tempmask:
 		Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar
-		pos=npy.nonzero(Bi>md.materials.rheology_B)
+		pos=np.nonzero(Bi>md.materials.rheology_B)
 		Bi[pos]=md.materials.rheology_B[pos]
 	
 	# analytical backstress solution
-	backstress=T-Bi*npy.sign(ex0)*(2+a0)*npy.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
-	backstress[npy.nonzero(backstress<0)]=0
+	backstress=T-Bi*np.sign(ex0)*(2+a0)*np.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
+	backstress[np.nonzero(backstress<0)]=0
 
 	return backstress
Index: /issm/trunk-jpl/src/m/mech/calcbackstress.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/calcbackstress.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/calcbackstress.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 from thomasparams import thomasparams
@@ -61,6 +61,6 @@
 	
 	# analytical backstress solution
-	backstress=T-(1.-D)*B*npy.sign(ex0)*(2+a0)*npy.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
-	backstress[npy.nonzero(backstress<0)]=0
+	backstress=T-(1.-D)*B*np.sign(ex0)*(2+a0)*np.abs(ex0)**(1./n)/((1+a0+a0**2+b0**2)**((n-1.)/2./n))
+	backstress[np.nonzero(backstress<0)]=0
 
 	return backstress
Index: /issm/trunk-jpl/src/m/mech/damagefrominversion.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/damagefrominversion.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/damagefrominversion.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def damagefrominversion(md):
@@ -24,20 +24,20 @@
 	if any(md.flowequation.element_equation!=2):
 		raise StandardError('Warning: the model has some non-SSA elements.  These will be treated like SSA elements')
-	if npy.ndim(md.results.StressbalanceSolution.MaterialsRheologyBbar)==2:
+	if np.ndim(md.results.StressbalanceSolution.MaterialsRheologyBbar)==2:
 		Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar.reshape(-1,)
 	else:
 		Bi=md.results.StressbalanceSolution.MaterialsRheologyBbar
-	if npy.ndim(md.materials.rheology_B)==2:
+	if np.ndim(md.materials.rheology_B)==2:
 		BT=md.materials.rheology_B.reshape(-1,)
 	else:
 		BT=md.materials.rheology_B
 
-	damage=npy.zeros_like(Bi)
+	damage=np.zeros_like(Bi)
 
 	# Damage where Bi softer than B(T)
-	pos=npy.nonzero(Bi<BT)[0]
+	pos=np.nonzero(Bi<BT)[0]
 	damage[pos]=1.-Bi[pos]/BT[pos]
 	
-	pos=npy.nonzero(damage<0)
+	pos=np.nonzero(damage<0)
 	damage[pos]=0
 
Index: /issm/trunk-jpl/src/m/mech/mechanicalproperties.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/mechanicalproperties.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/mechanicalproperties.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from GetNodalFunctionsCoeff import GetNodalFunctionsCoeff
 from results import results
@@ -27,5 +27,5 @@
 	#	raise StandardError('only 2D model supported currently')
 
-	if npy.any(md.flowequation.element_equation!=2):
+	if np.any(md.flowequation.element_equation!=2):
 		print 'Warning: the model has some non SSA elements. These will be treated like SSA elements'
 
@@ -35,11 +35,11 @@
             if len(damage)!=md.mesh.numberofvertices:
 		raise ValueError('if damage is supplied it should be of size ' + md.mesh.numberofvertices)
-            if npy.ndim(damage)==2:
+            if np.ndim(damage)==2:
                 damage=damage.reshape(-1,)
         else: damage=None
 
-	if npy.ndim(vx)==2:
+	if np.ndim(vx)==2:
 		vx=vx.reshape(-1,)
-	if npy.ndim(vy)==2:
+	if np.ndim(vy)==2:
 		vy=vy.reshape(-1,)
 	
@@ -48,9 +48,9 @@
 	numberofvertices=md.mesh.numberofvertices
 	index=md.mesh.elements
-	summation=npy.array([[1],[1],[1]])
-	directionsstress=npy.zeros((numberofelements,4))
-	directionsstrain=npy.zeros((numberofelements,4))
-	valuesstress=npy.zeros((numberofelements,2))
-	valuesstrain=npy.zeros((numberofelements,2))
+	summation=np.array([[1],[1],[1]])
+	directionsstress=np.zeros((numberofelements,4))
+	directionsstrain=np.zeros((numberofelements,4))
+	valuesstress=np.zeros((numberofelements,2))
+	valuesstrain=np.zeros((numberofelements,2))
 	
 	#compute nodal functions coefficients N(x,y)=alpha x + beta y +gamma
@@ -60,34 +60,34 @@
 	vxlist=vx[index-1]/md.constants.yts
 	vylist=vy[index-1]/md.constants.yts
-	ux=npy.dot((vxlist*alpha),summation).reshape(-1,)
-	uy=npy.dot((vxlist*beta),summation).reshape(-1,)
-	vx=npy.dot((vylist*alpha),summation).reshape(-1,)
-	vy=npy.dot((vylist*beta),summation).reshape(-1,)
+	ux=np.dot((vxlist*alpha),summation).reshape(-1,)
+	uy=np.dot((vxlist*beta),summation).reshape(-1,)
+	vx=np.dot((vylist*alpha),summation).reshape(-1,)
+	vy=np.dot((vylist*beta),summation).reshape(-1,)
 	uyvx=(vx+uy)/2.
 	#clear vxlist vylist
 	
 	#compute viscosity
-	nu=npy.zeros((numberofelements,))
-	B_bar=npy.dot(md.materials.rheology_B[index-1],summation/3.).reshape(-1,)
+	nu=np.zeros((numberofelements,))
+	B_bar=np.dot(md.materials.rheology_B[index-1],summation/3.).reshape(-1,)
 	power=((md.materials.rheology_n-1.)/(2.*md.materials.rheology_n)).reshape(-1,)
 	second_inv=(ux**2.+vy**2.+((uy+vx)**2.)/4.+ux*vy).reshape(-1,)
 	
 	#some corrections
-	location=npy.nonzero(npy.logical_and(second_inv==0,power!=0))
+	location=np.nonzero(np.logical_and(second_inv==0,power!=0))
 	nu[location]=10^18 	#arbitrary maximum viscosity to apply where there is no effective shear
 	
 	if 'matice' in md.materials.__module__:
-		location=npy.nonzero(second_inv)
+		location=np.nonzero(second_inv)
 		nu[location]=B_bar[location]/(second_inv[location]**power[location])
-		location=npy.nonzero(npy.logical_and(second_inv==0,power==0))
+		location=np.nonzero(np.logical_and(second_inv==0,power==0))
 		nu[location]=B_bar[location]
-		location=npy.nonzero(npy.logical_and(second_inv==0,power!=0))
+		location=np.nonzero(np.logical_and(second_inv==0,power!=0))
 		nu[location]=10^18
 	elif 'matdamageice' in md.materials.__module__ and damage is not None:
 		print 'computing damage-dependent properties!'
-		Zinv=npy.dot(1-damage[index-1],summation/3.).reshape(-1,)
-		location=npy.nonzero(second_inv)
-		nu[location]=Zinv[location]*B_bar[location]/npy.power(second_inv[location],power[location])
-		location=npy.nonzero(npy.logical_and(second_inv==0,power==0))
+		Zinv=np.dot(1-damage[index-1],summation/3.).reshape(-1,)
+		location=np.nonzero(second_inv)
+		nu[location]=Zinv[location]*B_bar[location]/np.power(second_inv[location],power[location])
+		location=np.nonzero(np.logical_and(second_inv==0,power==0))
 		nu[location]=Zinv[location]*B_bar[location]
 		#clear Zinv
@@ -101,12 +101,12 @@
 	
 	#compute principal properties of stress
-	for i in npy.arange(numberofelements):
+	for i in np.arange(numberofelements):
 	
 		#compute stress and strainrate matrices
-		stress=npy.array([ [tau_xx[i], tau_xy[i]], [tau_xy[i], tau_yy[i]] ])
-		strain=npy.array([ [ux[i], uyvx[i]], [uyvx[i], vy[i]] ])
+		stress=np.array([ [tau_xx[i], tau_xy[i]], [tau_xy[i], tau_yy[i]] ])
+		strain=np.array([ [ux[i], uyvx[i]], [uyvx[i], vy[i]] ])
 	
 		#eigenvalues and vectors for stress
-		value,directions=npy.linalg.eig(stress);
+		value,directions=np.linalg.eig(stress);
 		idx=abs(value).argsort()[::-1] # sort in descending order
 		value=value[idx]
@@ -116,5 +116,5 @@
 
 		#eigenvalues and vectors for strain
-		value,directions=npy.linalg.eig(strain);
+		value,directions=np.linalg.eig(strain);
 		idx=abs(value).argsort()[::-1] # sort in descending order
 		value=value[idx]
@@ -133,5 +133,5 @@
 	stress.principalvalue2=valuesstress[:,1]
 	stress.principalaxis2=directionsstress[:,2:4]
-	stress.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
+	stress.effectivevalue=1./np.sqrt(2.)*np.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
 	md.results.stress=stress
 	
@@ -144,5 +144,5 @@
 	strainrate.principalvalue2=valuesstrain[:,1]*md.constants.yts 
 	strainrate.principalaxis2=directionsstrain[:,2:4]
-	strainrate.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(strainrate.xx**2+strainrate.yy**2+2.*strainrate.xy**2)
+	strainrate.effectivevalue=1./np.sqrt(2.)*np.sqrt(strainrate.xx**2+strainrate.yy**2+2.*strainrate.xy**2)
 	md.results.strainrate=strainrate
 	
@@ -155,5 +155,5 @@
 	deviatoricstress.principalvalue2=valuesstress[:,1]
 	deviatoricstress.principalaxis2=directionsstress[:,2:4]
-	deviatoricstress.effectivevalue=1./npy.sqrt(2.)*npy.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
+	deviatoricstress.effectivevalue=1./np.sqrt(2.)*np.sqrt(stress.xx**2+stress.yy**2+2.*stress.xy**2)
 	md.results.deviatoricstress=deviatoricstress
 
Index: /issm/trunk-jpl/src/m/mech/robintemperature.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/robintemperature.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/robintemperature.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from scipy.special import erf
 
@@ -34,7 +34,7 @@
 	
 	#create vertical coordinate variable
-	zstar=npy.sqrt(2.*alphaT*thickness/accumrate)
+	zstar=np.sqrt(2.*alphaT*thickness/accumrate)
 	
-	tprofile=surftemp+npy.sqrt(2.*thickness*npy.pi/accumrate/alphaT)*(-heatflux)/2./rho/c*(erf(z/zstar)-erf(thickness/zstar))
+	tprofile=surftemp+np.sqrt(2.*thickness*np.pi/accumrate/alphaT)*(-heatflux)/2./rho/c*(erf(z/zstar)-erf(thickness/zstar))
 
 	return tprofile	
Index: /issm/trunk-jpl/src/m/mech/steadystateiceshelftemp.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/steadystateiceshelftemp.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/steadystateiceshelftemp.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 def steadystateiceshelftemp(md,surfacetemp,basaltemp):
@@ -42,20 +42,20 @@
 	temperature=(Ts+Tb)/2  # where wi~=0
 	
-	pos=npy.nonzero(abs(wi)>=1e-4) # to avoid division by zero
+	pos=np.nonzero(abs(wi)>=1e-4) # to avoid division by zero
 
-	npy.seterr(over='raise',divide='raise') # raise errors if floating point exceptions are encountered in following calculation
+	np.seterr(over='raise',divide='raise') # raise errors if floating point exceptions are encountered in following calculation
 	#calculate depth-averaged temperature (in Celsius)
 	try:
-		temperature[pos]=-( (Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos] - (Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])*npy.exp(Hi[pos]*wi[pos]/ki) )/( Hi[pos]*(npy.exp(Hi[pos]*wi[pos]/ki)-1))
+		temperature[pos]=-( (Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos] - (Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])*np.exp(Hi[pos]*wi[pos]/ki) )/( Hi[pos]*(np.exp(Hi[pos]*wi[pos]/ki)-1))
 	except FloatingPointError:
 		print 'steadystateiceshelf warning: overflow encountered in multipy/divide/exp, trying another formulation.' 
-		temperature[pos]=-( ((Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos])/npy.exp(Hi[pos]*wi[pos]/ki) - Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])/( Hi[pos]*(1-npy.exp(-Hi[pos]*wi[pos]/ki)))
+		temperature[pos]=-( ((Tb[pos]-Ts[pos])*ki/wi[pos] + Hi[pos]*Tb[pos])/np.exp(Hi[pos]*wi[pos]/ki) - Hi[pos]*Ts[pos] + (Tb[pos]-Ts[pos])*ki/wi[pos])/( Hi[pos]*(1-np.exp(-Hi[pos]*wi[pos]/ki)))
 	
 	#temperature should not be less than surface temp
-	pos=npy.nonzero(temperature<Ts)
+	pos=np.nonzero(temperature<Ts)
 	temperature[pos]=Ts[pos]
 	
 	# NaN where melt rates are too high (infinity/infinity in exponential)
-	pos=npy.nonzero(npy.isnan(temperature))
+	pos=np.nonzero(np.isnan(temperature))
 	temperature[pos]=Ts[pos]
 	
Index: /issm/trunk-jpl/src/m/mech/thomasparams.py
===================================================================
--- /issm/trunk-jpl/src/m/mech/thomasparams.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/mech/thomasparams.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from averaging import averaging
 
@@ -75,10 +75,10 @@
 	
 	# checks: any of e1 or e2 equal to zero?
-	pos=npy.nonzero(e1==0)
-	if npy.any(pos==1):
+	pos=np.nonzero(e1==0)
+	if np.any(pos==1):
 		print 'WARNING: first principal strain rate equal to zero.  Value set to 1e-13 s^-1'
 		e1[pos]=1.e-13
-	pos=npy.nonzero(e2==0)
-	if npy.any(pos==1):
+	pos=np.nonzero(e2==0)
+	if np.any(pos==1):
 		print 'WARNING: second principal strain rate equal to zero.  Value set to 1e-13 s^-1'
 		e2[pos]=1.e-13
@@ -89,19 +89,19 @@
 	
 	if coordsys=='principal':
-		b=npy.zeros((md.mesh.numberofvertices,))
+		b=np.zeros((md.mesh.numberofvertices,))
 		ex=e1
 		a=e2/e1
-		pos=npy.nonzero(npy.logical_and(e1<0,e2>0)) # longitudinal compression and lateral tension
+		pos=np.nonzero(np.logical_and(e1<0,e2>0)) # longitudinal compression and lateral tension
 		a[pos]=e1[pos]/e2[pos]
 		ex[pos]=e2[pos]
-		pos2=npy.nonzero(e1<0 & e2<0 & npy.abs(e1)<npy.abs(e2)) # lateral and longitudinal compression
+		pos2=np.nonzero(e1<0 & e2<0 & np.abs(e1)<np.abs(e2)) # lateral and longitudinal compression
 		a[pos2]=e1[pos2]/e2[pos2]
 		ex[pos2]=e2[pos2]
-		pos3=npy.nonzero(e1>0 & e2>0 & npy.abs(e1)<npy.abs(e2)) # lateral and longitudinal tension
+		pos3=np.nonzero(e1>0 & e2>0 & np.abs(e1)<np.abs(e2)) # lateral and longitudinal tension
 		a[pos3]=e1[pos3]/e2[pos3]
 		ex[pos3]=e2[pos3]
-		ind=npy.nonzero(e1<0 & e2<0)
+		ind=np.nonzero(e1<0 & e2<0)
 		a[ind]=-a[ind] # where both strain rates are compressive, enforce negative alpha
-		sigxx=(npy.abs(ex)/((1.+a+a**2)**((n-1.)/2.)))**(1./n)*B
+		sigxx=(np.abs(ex)/((1.+a+a**2)**((n-1.)/2.)))**(1./n)*B
 	elif coordsys=='xy':
 		ex=exx
@@ -110,10 +110,10 @@
 	elif coordsys=='longitudinal':
 		# using longitudinal strain rates defined by observed velocity vector
-		velangle=npy.arctan(md.initialization.vy/md.initialization.vx)
-		pos=npy.nonzero(md.initialization.vx==0)
-		velangle[pos]=npy.pi/2
-		ex=0.5*(exx+eyy)+0.5*(exx-eyy)*npy.cos(2.*velangle)+exy*npy.sin(2.*velangle)
+		velangle=np.arctan(md.initialization.vy/md.initialization.vx)
+		pos=np.nonzero(md.initialization.vx==0)
+		velangle[pos]=np.pi/2
+		ex=0.5*(exx+eyy)+0.5*(exx-eyy)*np.cos(2.*velangle)+exy*np.sin(2.*velangle)
 		ey=exx+eyy-ex # trace of strain rate tensor is invariant
-		exy=-0.5*(exx-eyy)*npy.sin(2.*velangle)+exy*npy.cos(2.*velangle)
+		exy=-0.5*(exx-eyy)*np.sin(2.*velangle)+exy*np.cos(2.*velangle)
 		a=ey/ex
 		b=exy/ex
@@ -124,5 +124,5 @@
 	# a < -1 in areas of strong lateral compression or longitudinal compression and
 	# theta flips sign at a = -2
-	pos=npy.nonzero(npy.abs((npy.abs(a)-2.))<1.e-3)
+	pos=np.nonzero(np.abs((np.abs(a)-2.))<1.e-3)
 	if len(pos)>0:
 		print 'Warning: ', len(pos), ' vertices have alpha within 1e-3 of -2'
@@ -131,10 +131,10 @@
 	if eq=='Weertman1D':
 		theta=1./8
-		a=npy.zeros((md.mesh.numberofvertices,))
+		a=np.zeros((md.mesh.numberofvertices,))
 	elif eq=='Weertman2D':
 		theta=1./9
-		a=npy.ones((md.mesh.numberofvertices,))
+		a=np.ones((md.mesh.numberofvertices,))
 	elif eq=='Thomas':
-		theta=((1.+a+a**2+b**2)**((n-1.)/2.))/(npy.abs(2.+a)**n)
+		theta=((1.+a+a**2+b**2)**((n-1.)/2.))/(np.abs(2.+a)**n)
 	else:
 		raise ValueError('argument passed to "eq" not valid')
Index: /issm/trunk-jpl/src/m/plot/applyoptions.py
===================================================================
--- /issm/trunk-jpl/src/m/plot/applyoptions.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/plot/applyoptions.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from cmaptools import truncate_colormap
 from plot_contour import plot_contour
@@ -223,13 +223,13 @@
 			cb.locator=MaxNLocator(nbins=5) # default 5 ticks
 		if options.exist('colorbartickspacing'):
-			locs=npy.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbartickspacing'))
+			locs=np.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbartickspacing'))
 			cb.set_ticks(locs)
 		if options.exist('colorbarlines'):
-			locs=npy.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbarlines'))
-			cb.add_lines(locs,['k' for i in range(len(locs))],npy.ones_like(locs))
+			locs=np.arange(lims[0],lims[1]+1,options.getfieldvalue('colorbarlines'))
+			cb.add_lines(locs,['k' for i in range(len(locs))],np.ones_like(locs))
 		if options.exist('colorbarlineatvalue'):
 			locs=options.getfieldvalue('colorbarlineatvalue')
 			colors=options.getfieldvalue('colorbarlineatvaluecolor',['k' for i in range (len(locs))])
-			widths=options.getfieldvalue('colorbarlineatvaluewidth',npy.ones_like(locs))
+			widths=options.getfieldvalue('colorbarlineatvaluewidth',np.ones_like(locs))
 			cb.add_lines(locs,colors,widths)
 		if options.exist('colorbartitle'):
Index: /issm/trunk-jpl/src/m/plot/colormaps/cmaptools.py
===================================================================
--- /issm/trunk-jpl/src/m/plot/colormaps/cmaptools.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/plot/colormaps/cmaptools.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 
 try:
@@ -21,5 +21,5 @@
 
 	new_cmap = mpl.colors.LinearSegmentedColormap.from_list('trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name,
-		a=minval, b=maxval), cmap(npy.linspace(minval, maxval, n)))
+		a=minval, b=maxval), cmap(np.linspace(minval, maxval, n)))
 	
 	return new_cmap
Index: /issm/trunk-jpl/src/m/plot/plot_contour.py
===================================================================
--- /issm/trunk-jpl/src/m/plot/plot_contour.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/plot/plot_contour.py	(revision 21254)
@@ -26,5 +26,5 @@
 		pass
 	elif datatype==3: # quiver (vector) data
-		data=npy.sqrt(datain**2)
+		data=np.sqrt(datain**2)
 	else:
 		raise ValueError('datatype not supported in call to plot_contour')
Index: /issm/trunk-jpl/src/m/plot/plot_overlay.py
===================================================================
--- /issm/trunk-jpl/src/m/plot/plot_overlay.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/plot/plot_overlay.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from processmesh import processmesh
 from processdata import processdata
@@ -28,5 +28,5 @@
 	if data=='none' or data==None:
 		imageonly=1
-		data=npy.float('nan')*npy.ones((md.mesh.numberofvertices,))
+		data=np.float('nan')*np.ones((md.mesh.numberofvertices,))
 		datatype=1
 	else:
@@ -70,5 +70,5 @@
 
 	# normalize array
-	arr=arr/npy.float(npy.max(arr.ravel()))
+	arr=arr/np.float(np.max(arr.ravel()))
         arr=1.-arr # somehow the values got flipped
 
@@ -96,7 +96,7 @@
 	dy=trans[5]	
 	
-	xarr=npy.arange(xmin,xmax,dx)
-	yarr=npy.arange(ymin,ymax,-dy) # -dy since origin='upper' (not sure how robust this is)
-	xg,yg=npy.meshgrid(xarr,yarr)
+	xarr=np.arange(xmin,xmax,dx)
+	yarr=np.arange(ymin,ymax,-dy) # -dy since origin='upper' (not sure how robust this is)
+	xg,yg=np.meshgrid(xarr,yarr)
 	overlaylims=options.getfieldvalue('overlaylims',[min(arr.ravel()),max(arr.ravel())])
 	norm=mpl.colors.Normalize(vmin=overlaylims[0],vmax=overlaylims[1])
@@ -118,13 +118,13 @@
                 #test
                 #m.ax=ax
-	        meridians=npy.arange(-180.,181.,1.)
-	        parallels=npy.arange(-80.,80.,1.)
+	        meridians=np.arange(-180.,181.,1.)
+	        parallels=np.arange(-80.,80.,1.)
 	        m.drawparallels(parallels,labels=[0,0,1,1]) # labels=[left,right,top,bottom]
 	        m.drawmeridians(meridians,labels=[1,1,0,0])
                 m.drawcoastlines()
-	        pc=m.pcolormesh(xg, yg, npy.flipud(arr), cmap=mpl.cm.Greys, norm=norm, ax=ax)
+	        pc=m.pcolormesh(xg, yg, np.flipud(arr), cmap=mpl.cm.Greys, norm=norm, ax=ax)
 
 	else:
-	        pc=ax.pcolormesh(xg, yg, npy.flipud(arr), cmap=mpl.cm.Greys, norm=norm)
+	        pc=ax.pcolormesh(xg, yg, np.flipud(arr), cmap=mpl.cm.Greys, norm=norm)
         
 	#rasterization? 
Index: /issm/trunk-jpl/src/m/plot/plot_streamlines.py
===================================================================
--- /issm/trunk-jpl/src/m/plot/plot_streamlines.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/plot/plot_streamlines.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from processmesh import processmesh
 from processdata import processdata
@@ -45,5 +45,5 @@
 
     # format data for matplotlib streamplot function
-    yg,xg=npy.mgrid[min(md.mesh.y):max(md.mesh.y):100j,min(md.mesh.x):max(md.mesh.x):100j]
+    yg,xg=np.mgrid[min(md.mesh.y):max(md.mesh.y):100j,min(md.mesh.x):max(md.mesh.x):100j]
     ug=griddata((x,y),u,(xg,yg),method='linear')
     vg=griddata((x,y),v,(xg,yg),method='linear')
@@ -60,6 +60,6 @@
     if linewidth=='vel':
         scale=options.getfieldvalue('streamlineswidthscale',3)
-        vel=npy.sqrt(ug**2+vg**2)
-        linewidth=scale*vel/npy.amax(vel)
+        vel=np.sqrt(ug**2+vg**2)
+        linewidth=scale*vel/np.amax(vel)
         linewidth[linewidth<0.5]=0.5
 
Index: /issm/trunk-jpl/src/m/plot/plotmodel.py
===================================================================
--- /issm/trunk-jpl/src/m/plot/plotmodel.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/plot/plotmodel.py	(revision 21254)
@@ -1,3 +1,3 @@
-import numpy as npy
+import numpy as np
 from plotoptions import plotoptions
 
@@ -34,5 +34,5 @@
 		nr=True
 	else:
-		nrows=npy.ceil(numberofplots/subplotwidth)
+		nrows=np.ceil(numberofplots/subplotwidth)
 		nr=False
 	
Index: /issm/trunk-jpl/src/m/plot/processmesh.py
===================================================================
--- /issm/trunk-jpl/src/m/plot/processmesh.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/plot/processmesh.py	(revision 21254)
@@ -1,4 +1,4 @@
 from math import isnan
-import numpy as npy
+import numpy as np
 
 def processmesh(md,data,options):
@@ -35,5 +35,5 @@
 			z=md.mesh.z
 		except AttributeError:
-			z=npy.zeros_like(md.mesh.x)
+			z=np.zeros_like(md.mesh.x)
 		
 		try:
Index: /issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py
===================================================================
--- /issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py	(revision 21253)
+++ /issm/trunk-jpl/src/m/solve/parseresultsfromdisk.py	(revision 21254)
@@ -3,16 +3,14 @@
 from collections import OrderedDict
 import results as resultsclass
-import MatlabFuncs as m
 
 def parseresultsfromdisk(md,filename,iosplit):
 	if iosplit:
-		results=parseresultsfromdiskiosplit(md,filename)
+		saveres=parseresultsfromdiskiosplit(md,filename)
 	else:
-		results=parseresultsfromdiskioserial(md,filename)
-
-	return results
+		saveres=parseresultsfromdiskioserial(md,filename)
+
+	return saveres
 
 def parseresultsfromdiskioserial(md,filename):    # {{{
-
 	#Open file
 	try:
@@ -22,64 +20,57 @@
 
 	#initialize results: 
-	results=[]
-	results.append(None)
+	saveres=[]
 
 	#Read fields until the end of the file.
-	result=ReadData(fid,md)
+	loadres=ReadData(fid,md)
 
 	counter=0
 	check_nomoresteps=0
-	step=result['step']
-
-	while result:
-
+	step=loadres['step']
+	print ('step is now {}'.format(step))
+
+	while loadres:
+		#check that the new result does not add a step, which would be an error: 
 		if check_nomoresteps:
-			#check that the new result does not add a step, which would be an error: 
-			if result['step']>=1:
+			if loadres['step']>=1:
 				raise TypeError("parsing results for a steady-state core, which incorporates transient results!")
 
 		#Check step, increase counter if this is a new step
-		if(step!=result['step'] and result['step']>1):
+		if(step!=loadres['step'] and loadres['step']>1):
 			counter = counter + 1
-			step    = result['step']
-
-		#Add result
-		if result['step']==0:
+			step    = loadres['step']
+
+		#Add result
+		if loadres['step']==0:
 			#if we have a step = 0, this is a steady state solution, don't expect more steps. 
 			index = 0;
 			check_nomoresteps=1
-	
-		elif result['step']==1:
+		elif loadres['step']==1:
 			index = 0
 		else:
 			index = counter;
-	
-		if index > len(results)-1:
-			for i in xrange(len(results)-1,index-1):
-				results.append(None)
-			results.append(resultsclass.results())
 		
-		elif results[index] is None:
-			results[index]=resultsclass.results()
-
+		if index > len(saveres)-1:
+			for i in xrange(len(saveres)-1,index-1):
+				saveres.append(None)
+			saveres.append(resultsclass.results())
+		elif saveres[index] is None:
+			saveres[index]=resultsclass.results()
 			
 		#Get time and step
-		if result['step'] != -9999.:
-			setattr(results[index],'step',result['step'])
-		if result['time'] != -9999.:
-			setattr(results[index],'time',result['time']) 
-	
-		#Add result
-		if hasattr(results[index],result['fieldname']) and not m.strcmp(result['fieldname'],'SolutionType'):
-			setattr(results[index],result['fieldname'],numpy.vstack((getattr(results[index],result['fieldname']),result['field'])))
-		else:
-			setattr(results[index],result['fieldname'],result['field'])
-
-		#read next result
-		result=ReadData(fid,md)
+		if loadres['step'] != -9999.:
+			saveres[index].__dict__['step']=loadres['step']
+		if loadres['time'] != -9999.:
+			saveres[index].__dict__['time']=loadres['time']
+
+		#Add result
+		saveres[index].__dict__[loadres['fieldname']]=loadres['field']
+
+		#read next result
+		loadres=ReadData(fid,md)
 
 	fid.close()
 
-	return results
+	return saveres
 	# }}}
 def parseresultsfromdiskiosplit(md,filename):    # {{{
@@ -91,56 +82,56 @@
 		raise IOError("loadresultsfromdisk error message: could not open '%s' for binary reading." % filename)
 
-	results=[]
+	saveres=[]
 
 	#if we have done split I/O, ie, we have results that are fragmented across patches, 
 	#do a first pass, and figure out the structure of results
-	result=ReadDataDimensions(fid)
-	while result:
+	loadres=ReadDataDimensions(fid)
+	while loadres:
 
 		#Get time and step
-		if result['step'] > len(results):
-			for i in xrange(len(results),result['step']-1):
-				results.append(None)
-			results.append(resultsclass.results())
-		setattr(results[result['step']-1],'step',result['step'])
-		setattr(results[result['step']-1],'time',result['time']) 
-
-		#Add result
-		setattr(results[result['step']-1],result['fieldname'],float('NaN'))
-
-		#read next result
-		result=ReadDataDimensions(fid)
+		if loadres['step'] > len(saveres):
+			for i in xrange(len(saveres),loadres['step']-1):
+				saveres.append(None)
+			saveres.append(resultsclass.results())
+		setattr(saveres[loadres['step']-1],'step',loadres['step'])
+		setattr(saveres[loadres['step']-1],'time',loadres['time']) 
+
+		#Add result
+		setattr(saveres[loadres['step']-1],loadres['fieldname'],float('NaN'))
+
+		#read next result
+		loadres=ReadDataDimensions(fid)
 
 	#do a second pass, and figure out the size of the patches
 	fid.seek(0)    #rewind
-	result=ReadDataDimensions(fid)
-	while result:
-
-		#read next result
-		result=ReadDataDimensions(fid)
+	loadres=ReadDataDimensions(fid)
+	while loadres:
+
+		#read next result
+		loadres=ReadDataDimensions(fid)
 
 	#third pass, this time to read the real information
 	fid.seek(0)    #rewind
-	result=ReadData(fid,md)
-	while result:
+	loadres=ReadData(fid,md)
+	while loadres:
 
 		#Get time and step
-		if result['step']> len(results):
-			for i in xrange(len(results),result['step']-1):
-				results.append(None)
-			results.append(resultsclass.results())
-		setattr(results[result['step']-1],'step',result['step'])
-		setattr(results[result['step']-1],'time',result['time']) 
-
-		#Add result
-		setattr(results[result['step']-1],result['fieldname'],result['field'])
-
-		#read next result
-		result=ReadData(fid,md)
+		if loadres['step']> len(saveres):
+			for i in xrange(len(saveres),loadres['step']-1):
+				saveres.append(None)
+			saveres.append(saveresclass.saveres())
+		setattr(saveres[loadres['step']-1],'step',loadres['step'])
+		setattr(saveres[loadres['step']-1],'time',loadres['time']) 
+
+		#Add result
+		setattr(saveres[loadres['step']-1],loadres['fieldname'],loadres['field'])
+
+		#read next result
+		loadres=ReadData(fid,md)
 
 	#close file
 	fid.close()
 
-	return results
+	return saveres
 	# }}}
 def ReadData(fid,md):    # {{{
@@ -177,43 +168,43 @@
 		#Process units here FIXME: this should not be done here!
 		yts=md.constants.yts
-		if m.strcmp(fieldname,'BalancethicknessThickeningRate'):
-			field = field*yts
-		elif m.strcmp(fieldname,'Time'):
+		if fieldname=='BalancethicknessThickeningRate':
+			field = field*yts
+		elif fieldname=='Time':
 			field = field/yts
-		elif m.strcmp(fieldname,'HydrologyWaterVx'):
-			field = field*yts
-		elif m.strcmp(fieldname,'HydrologyWaterVy'):
-			field = field*yts
-		elif m.strcmp(fieldname,'Vx'):
-			field = field*yts
-		elif m.strcmp(fieldname,'Vy'):
-			field = field*yts
-		elif m.strcmp(fieldname,'Vz'):
-			field = field*yts
-		elif m.strcmp(fieldname,'Vel'):
-			field = field*yts
-		elif m.strcmp(fieldname,'BasalforcingsGroundediceMeltingRate'):
-			field = field*yts
-		elif m.strcmp(fieldname,'TotalFloatingBmb'):
+		elif fieldname=='HydrologyWaterVx':
+			field = field*yts
+		elif fieldname=='HydrologyWaterVy':
+			field = field*yts
+		elif fieldname=='Vx':
+			field = field*yts
+		elif fieldname=='Vy':
+			field = field*yts
+		elif fieldname=='Vz':
+			field = field*yts
+		elif fieldname=='Vel':
+			field = field*yts
+		elif fieldname=='BasalforcingsGroundediceMeltingRate':
+			field = field*yts
+		elif fieldname=='TotalFloatingBmb':
 			field = field/10.**12.*yts #(GigaTon/year)
-		elif m.strcmp(fieldname,'TotalGroundedBmb'):
+		elif fieldname=='TotalGroundedBmb':
 			field = field/10.**12.*yts #(GigaTon/year)
-		elif m.strcmp(fieldname,'TotalSmb'):
+		elif fieldname=='TotalSmb':
 			field = field/10.**12.*yts #(GigaTon/year)
-		elif m.strcmp(fieldname,'SmbMassBalance'):
-			field = field*yts
-		elif m.strcmp(fieldname,'CalvingCalvingrate'):
-			field = field*yts
-
-		result=OrderedDict()
-		result['fieldname']=fieldname
-		result['time']=time
-		result['step']=step
-		result['field']=field
+		elif fieldname=='SmbMassBalance':
+			field = field*yts
+		elif fieldname=='CalvingCalvingrate':
+			field = field*yts
+
+		saveres=OrderedDict()
+		saveres['fieldname']=fieldname
+		saveres['time']=time
+		saveres['step']=step
+		saveres['field']=field
 
 	except struct.error as e:
-		result=None
-
-	return result
+		saveres=None
+
+	return saveres
 	# }}}
 def ReadDataDimensions(fid):    # {{{
@@ -246,14 +237,14 @@
 			raise TypeError("cannot read data of type %d" % type)
 
-		result=OrderedDict()
-		result['fieldname']=fieldname
-		result['time']=time
-		result['step']=step
-		result['M']=M
-		result['N']=N
+		saveres=OrderedDict()
+		saveres['fieldname']=fieldname
+		saveres['time']=time
+		saveres['step']=step
+		saveres['M']=M
+		saveres['N']=N
 
 	except struct.error as e:
-		result=None
-
-	return result
-	# }}}
+		saveres=None
+
+	return saveres
+	# }}}
